From 95cd7aa5442ef884d1ecf666a8d3c98d21f26a8c Mon Sep 17 00:00:00 2001 From: j-rafique Date: Thu, 27 Mar 2025 17:40:04 +0500 Subject: [PATCH] implement sn processing for cascade action --- .gitignore | 1 + gen/supernode/action/.gitkeep | 0 gen/supernode/action/cascade/service.pb.go | 347 ++++++++++ .../action/cascade/service_grpc.pb.go | 154 +++++ gen/supernode/nft/.gitkeep | 0 go.mod | 5 +- go.sum | 7 + pkg/common/blocktracker/block_tracker.go | 121 ++++ pkg/common/blocktracker/block_tracker_test.go | 97 +++ pkg/common/task/action.go | 20 + pkg/common/task/state/state.go | 174 +++++ pkg/common/task/state/status.go | 34 + pkg/common/task/task.go | 143 ++++ pkg/common/task/ticket.go | 13 + pkg/common/task/worker.go | 90 +++ pkg/common/task/worker_test.go | 147 ++++ pkg/configurer/file.go | 58 ++ pkg/configurer/path_darwin.go | 20 + pkg/configurer/path_linux.go | 20 + pkg/configurer/path_windows.go | 34 + pkg/errgroup/errgroup.go | 37 + pkg/logtrace/fields.go | 21 +- pkg/lumera/client.go | 2 +- pkg/lumera/config.go | 5 + pkg/lumera/interface.go | 2 +- pkg/lumera/lumera_mock.go | 108 +++ pkg/lumera/modules/action/action_mock.go | 66 ++ pkg/lumera/modules/action/interface.go | 1 + pkg/lumera/modules/node/impl.go | 50 +- pkg/lumera/modules/node/interface.go | 12 +- pkg/lumera/modules/node/node_mock.go | 155 +++++ pkg/lumera/modules/supernode/impl.go | 34 + pkg/lumera/modules/supernode/interface.go | 2 + .../modules/supernode/supernode_mock.go | 81 +++ pkg/lumera/modules/tx/interface.go | 1 + pkg/lumera/modules/tx/tx_mock.go | 81 +++ pkg/lumera/options.go | 9 + pkg/raptorq/config.go | 2 + pkg/raptorq/connection.go | 6 +- pkg/raptorq/gen_rq_identifier_files.go | 126 ++++ pkg/raptorq/helper.go | 183 +++++ pkg/raptorq/interfaces.go | 9 +- pkg/raptorq/raptorq.go | 3 +- pkg/raptorq/rq_mock.go | 25 +- pkg/raptorq/rq_server_client.go | 27 +- pkg/raptorq/valdate_rqids.go | 45 ++ pkg/storage/file_storage_interface.go | 45 ++ pkg/storage/files/file.go | 382 +++++++++++ pkg/storage/files/format.go | 36 + pkg/storage/files/storage.go | 82 +++ pkg/storage/files/storage_test.go | 37 + pkg/storage/files/thumbnail.go | 9 + pkg/storage/fs/file.go | 87 +++ pkg/storage/fs/file_test.go | 168 +++++ pkg/storage/queries/health_check.go | 430 ++++++++++++ pkg/storage/queries/local.go | 16 + pkg/storage/queries/ping_history.go | 294 ++++++++ pkg/storage/queries/self_healing.go | 644 ++++++++++++++++++ pkg/storage/queries/sqlite.go | 413 +++++++++++ pkg/storage/queries/storage_challenge.go | 493 ++++++++++++++ pkg/storage/queries/task_history.go | 70 ++ pkg/storage/rqstore/store.go | 2 +- pkg/testutil/lumera.go | 14 + pkg/types/healthcheck_challenge.go | 164 +++++ pkg/types/self_healing.go | 252 +++++++ pkg/types/storage_challenge.go | 249 +++++++ pkg/types/ticket.go | 89 +++ pkg/types/types.go | 120 ++++ proto/proto.go | 6 + proto/supernode/action/.gitkeep | 0 proto/supernode/action/cascade/service.proto | 32 + supernode/cmd/service.go | 33 + supernode/cmd/start.go | 108 ++- supernode/cmd/supernode.go | 18 +- supernode/config.yml | 8 +- supernode/config/config.go | 15 + .../server/cascade/cascade_action_server.go | 20 + .../node/action/server/cascade/session.go | 73 ++ .../cascade/upload_cascade_action_input.go | 40 ++ supernode/node/common/register_cascade.go | 51 ++ supernode/node/supernode/client/client.go | 50 ++ supernode/node/supernode/client/connection.go | 21 + supernode/node/supernode/client/session.go | 13 + .../node/supernode/node_client_interface.go | 32 + .../server/common/register_cascade.go | 56 ++ supernode/node/supernode/server/config.go | 20 + supernode/node/supernode/server/server.go | 141 ++++ supernode/services/cascade/config.go | 28 + supernode/services/cascade/service.go | 76 +++ supernode/services/cascade/task.go | 64 ++ supernode/services/cascade/upload.go | 170 +++++ supernode/services/common/config.go | 19 + supernode/services/common/network_handler.go | 256 +++++++ supernode/services/common/node_peer.go | 82 +++ supernode/services/common/p2p.go | 21 + supernode/services/common/reg_task_helper.go | 140 ++++ supernode/services/common/service.go | 72 ++ supernode/services/common/status.go | 124 ++++ supernode/services/common/status_test.go | 350 ++++++++++ supernode/services/common/storage_handler.go | 374 ++++++++++ supernode/services/common/supernode_task.go | 61 ++ tests/system/cascade_test.go | 382 +++++++++++ tests/system/go.mod | 9 +- tests/system/go.sum | 119 +++- tests/system/supernode.go | 122 ---- tests/system/supernode_sytem_test.go | 67 -- tests/system/system.go | 2 +- 107 files changed, 9683 insertions(+), 266 deletions(-) delete mode 100644 gen/supernode/action/.gitkeep create mode 100644 gen/supernode/action/cascade/service.pb.go create mode 100644 gen/supernode/action/cascade/service_grpc.pb.go delete mode 100644 gen/supernode/nft/.gitkeep create mode 100644 pkg/common/blocktracker/block_tracker.go create mode 100644 pkg/common/blocktracker/block_tracker_test.go create mode 100644 pkg/common/task/action.go create mode 100644 pkg/common/task/state/state.go create mode 100644 pkg/common/task/state/status.go create mode 100644 pkg/common/task/task.go create mode 100644 pkg/common/task/ticket.go create mode 100644 pkg/common/task/worker.go create mode 100644 pkg/common/task/worker_test.go create mode 100644 pkg/configurer/file.go create mode 100644 pkg/configurer/path_darwin.go create mode 100644 pkg/configurer/path_linux.go create mode 100644 pkg/configurer/path_windows.go create mode 100644 pkg/errgroup/errgroup.go create mode 100644 pkg/lumera/lumera_mock.go create mode 100644 pkg/lumera/modules/action/action_mock.go create mode 100644 pkg/lumera/modules/node/node_mock.go create mode 100644 pkg/lumera/modules/supernode/supernode_mock.go create mode 100644 pkg/lumera/modules/tx/tx_mock.go create mode 100644 pkg/raptorq/gen_rq_identifier_files.go create mode 100644 pkg/raptorq/helper.go create mode 100644 pkg/raptorq/valdate_rqids.go create mode 100644 pkg/storage/file_storage_interface.go create mode 100644 pkg/storage/files/file.go create mode 100644 pkg/storage/files/format.go create mode 100644 pkg/storage/files/storage.go create mode 100644 pkg/storage/files/storage_test.go create mode 100644 pkg/storage/files/thumbnail.go create mode 100644 pkg/storage/fs/file.go create mode 100644 pkg/storage/fs/file_test.go create mode 100644 pkg/storage/queries/health_check.go create mode 100644 pkg/storage/queries/local.go create mode 100644 pkg/storage/queries/ping_history.go create mode 100644 pkg/storage/queries/self_healing.go create mode 100644 pkg/storage/queries/sqlite.go create mode 100644 pkg/storage/queries/storage_challenge.go create mode 100644 pkg/storage/queries/task_history.go create mode 100644 pkg/types/healthcheck_challenge.go create mode 100644 pkg/types/self_healing.go create mode 100644 pkg/types/storage_challenge.go create mode 100644 pkg/types/ticket.go create mode 100644 pkg/types/types.go create mode 100644 proto/proto.go delete mode 100644 proto/supernode/action/.gitkeep create mode 100644 proto/supernode/action/cascade/service.proto create mode 100644 supernode/cmd/service.go create mode 100644 supernode/node/action/server/cascade/cascade_action_server.go create mode 100644 supernode/node/action/server/cascade/session.go create mode 100644 supernode/node/action/server/cascade/upload_cascade_action_input.go create mode 100644 supernode/node/common/register_cascade.go create mode 100644 supernode/node/supernode/client/client.go create mode 100644 supernode/node/supernode/client/connection.go create mode 100644 supernode/node/supernode/client/session.go create mode 100644 supernode/node/supernode/node_client_interface.go create mode 100644 supernode/node/supernode/server/common/register_cascade.go create mode 100644 supernode/node/supernode/server/config.go create mode 100644 supernode/node/supernode/server/server.go create mode 100644 supernode/services/cascade/config.go create mode 100644 supernode/services/cascade/service.go create mode 100644 supernode/services/cascade/task.go create mode 100644 supernode/services/cascade/upload.go create mode 100644 supernode/services/common/config.go create mode 100644 supernode/services/common/network_handler.go create mode 100644 supernode/services/common/node_peer.go create mode 100644 supernode/services/common/p2p.go create mode 100644 supernode/services/common/reg_task_helper.go create mode 100644 supernode/services/common/service.go create mode 100644 supernode/services/common/status.go create mode 100644 supernode/services/common/status_test.go create mode 100644 supernode/services/common/storage_handler.go create mode 100644 supernode/services/common/supernode_task.go create mode 100644 tests/system/cascade_test.go delete mode 100644 tests/system/supernode.go delete mode 100644 tests/system/supernode_sytem_test.go diff --git a/.gitignore b/.gitignore index fbed8138..18e02713 100644 --- a/.gitignore +++ b/.gitignore @@ -25,3 +25,4 @@ go.work.sum # env file .env /data +/tests/system/data diff --git a/gen/supernode/action/.gitkeep b/gen/supernode/action/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/gen/supernode/action/cascade/service.pb.go b/gen/supernode/action/cascade/service.pb.go new file mode 100644 index 00000000..b5105f4e --- /dev/null +++ b/gen/supernode/action/cascade/service.pb.go @@ -0,0 +1,347 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v5.29.3 +// source: proto/supernode/action/cascade/service.proto + +package cascade + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type UploadInputDataRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Filename string `protobuf:"bytes,1,opt,name=filename,proto3" json:"filename,omitempty"` + ActionId string `protobuf:"bytes,2,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"` + DataHash string `protobuf:"bytes,3,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + RqMax int32 `protobuf:"varint,4,opt,name=rq_max,json=rqMax,proto3" json:"rq_max,omitempty"` + SignedData string `protobuf:"bytes,5,opt,name=signed_data,json=signedData,proto3" json:"signed_data,omitempty"` + Data []byte `protobuf:"bytes,6,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UploadInputDataRequest) Reset() { + *x = UploadInputDataRequest{} + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UploadInputDataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UploadInputDataRequest) ProtoMessage() {} + +func (x *UploadInputDataRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UploadInputDataRequest.ProtoReflect.Descriptor instead. +func (*UploadInputDataRequest) Descriptor() ([]byte, []int) { + return file_proto_supernode_action_cascade_service_proto_rawDescGZIP(), []int{0} +} + +func (x *UploadInputDataRequest) GetFilename() string { + if x != nil { + return x.Filename + } + return "" +} + +func (x *UploadInputDataRequest) GetActionId() string { + if x != nil { + return x.ActionId + } + return "" +} + +func (x *UploadInputDataRequest) GetDataHash() string { + if x != nil { + return x.DataHash + } + return "" +} + +func (x *UploadInputDataRequest) GetRqMax() int32 { + if x != nil { + return x.RqMax + } + return 0 +} + +func (x *UploadInputDataRequest) GetSignedData() string { + if x != nil { + return x.SignedData + } + return "" +} + +func (x *UploadInputDataRequest) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type UploadInputDataResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UploadInputDataResponse) Reset() { + *x = UploadInputDataResponse{} + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UploadInputDataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UploadInputDataResponse) ProtoMessage() {} + +func (x *UploadInputDataResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UploadInputDataResponse.ProtoReflect.Descriptor instead. +func (*UploadInputDataResponse) Descriptor() ([]byte, []int) { + return file_proto_supernode_action_cascade_service_proto_rawDescGZIP(), []int{1} +} + +func (x *UploadInputDataResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *UploadInputDataResponse) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +type SessionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + IsPrimary bool `protobuf:"varint,1,opt,name=is_primary,json=isPrimary,proto3" json:"is_primary,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SessionRequest) Reset() { + *x = SessionRequest{} + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SessionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionRequest) ProtoMessage() {} + +func (x *SessionRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionRequest.ProtoReflect.Descriptor instead. +func (*SessionRequest) Descriptor() ([]byte, []int) { + return file_proto_supernode_action_cascade_service_proto_rawDescGZIP(), []int{2} +} + +func (x *SessionRequest) GetIsPrimary() bool { + if x != nil { + return x.IsPrimary + } + return false +} + +type SessionReply struct { + state protoimpl.MessageState `protogen:"open.v1"` + SessID string `protobuf:"bytes,1,opt,name=sessID,proto3" json:"sessID,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SessionReply) Reset() { + *x = SessionReply{} + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SessionReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionReply) ProtoMessage() {} + +func (x *SessionReply) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionReply.ProtoReflect.Descriptor instead. +func (*SessionReply) Descriptor() ([]byte, []int) { + return file_proto_supernode_action_cascade_service_proto_rawDescGZIP(), []int{3} +} + +func (x *SessionReply) GetSessID() string { + if x != nil { + return x.SessID + } + return "" +} + +var File_proto_supernode_action_cascade_service_proto protoreflect.FileDescriptor + +var file_proto_supernode_action_cascade_service_proto_rawDesc = string([]byte{ + 0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, + 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x22, 0xba, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x64, 0x61, 0x74, 0x61, 0x48, 0x61, 0x73, 0x68, 0x12, 0x15, 0x0a, 0x06, 0x72, 0x71, 0x5f, 0x6d, + 0x61, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x72, 0x71, 0x4d, 0x61, 0x78, 0x12, + 0x1f, 0x0a, 0x0b, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x4d, 0x0a, 0x17, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x22, 0x2f, 0x0a, 0x0e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x70, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x22, 0x26, 0x0a, 0x0c, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x73, 0x73, 0x49, 0x44, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x73, 0x73, 0x49, 0x44, 0x32, 0xa5, 0x01, 0x0a, + 0x0e, 0x43, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x3d, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x2e, 0x63, 0x61, 0x73, + 0x63, 0x61, 0x64, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x28, 0x01, 0x30, 0x01, 0x12, 0x54, + 0x0a, 0x0f, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x1f, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x55, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x55, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2f, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_proto_supernode_action_cascade_service_proto_rawDescOnce sync.Once + file_proto_supernode_action_cascade_service_proto_rawDescData []byte +) + +func file_proto_supernode_action_cascade_service_proto_rawDescGZIP() []byte { + file_proto_supernode_action_cascade_service_proto_rawDescOnce.Do(func() { + file_proto_supernode_action_cascade_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_supernode_action_cascade_service_proto_rawDesc), len(file_proto_supernode_action_cascade_service_proto_rawDesc))) + }) + return file_proto_supernode_action_cascade_service_proto_rawDescData +} + +var file_proto_supernode_action_cascade_service_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_proto_supernode_action_cascade_service_proto_goTypes = []any{ + (*UploadInputDataRequest)(nil), // 0: cascade.UploadInputDataRequest + (*UploadInputDataResponse)(nil), // 1: cascade.UploadInputDataResponse + (*SessionRequest)(nil), // 2: cascade.SessionRequest + (*SessionReply)(nil), // 3: cascade.SessionReply +} +var file_proto_supernode_action_cascade_service_proto_depIdxs = []int32{ + 2, // 0: cascade.CascadeService.Session:input_type -> cascade.SessionRequest + 0, // 1: cascade.CascadeService.UploadInputData:input_type -> cascade.UploadInputDataRequest + 3, // 2: cascade.CascadeService.Session:output_type -> cascade.SessionReply + 1, // 3: cascade.CascadeService.UploadInputData:output_type -> cascade.UploadInputDataResponse + 2, // [2:4] is the sub-list for method output_type + 0, // [0:2] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_proto_supernode_action_cascade_service_proto_init() } +func file_proto_supernode_action_cascade_service_proto_init() { + if File_proto_supernode_action_cascade_service_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_supernode_action_cascade_service_proto_rawDesc), len(file_proto_supernode_action_cascade_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_supernode_action_cascade_service_proto_goTypes, + DependencyIndexes: file_proto_supernode_action_cascade_service_proto_depIdxs, + MessageInfos: file_proto_supernode_action_cascade_service_proto_msgTypes, + }.Build() + File_proto_supernode_action_cascade_service_proto = out.File + file_proto_supernode_action_cascade_service_proto_goTypes = nil + file_proto_supernode_action_cascade_service_proto_depIdxs = nil +} diff --git a/gen/supernode/action/cascade/service_grpc.pb.go b/gen/supernode/action/cascade/service_grpc.pb.go new file mode 100644 index 00000000..fc9bf998 --- /dev/null +++ b/gen/supernode/action/cascade/service_grpc.pb.go @@ -0,0 +1,154 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.29.3 +// source: proto/supernode/action/cascade/service.proto + +package cascade + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + CascadeService_Session_FullMethodName = "/cascade.CascadeService/Session" + CascadeService_UploadInputData_FullMethodName = "/cascade.CascadeService/UploadInputData" +) + +// CascadeServiceClient is the client API for CascadeService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CascadeServiceClient interface { + Session(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SessionRequest, SessionReply], error) + UploadInputData(ctx context.Context, in *UploadInputDataRequest, opts ...grpc.CallOption) (*UploadInputDataResponse, error) +} + +type cascadeServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewCascadeServiceClient(cc grpc.ClientConnInterface) CascadeServiceClient { + return &cascadeServiceClient{cc} +} + +func (c *cascadeServiceClient) Session(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SessionRequest, SessionReply], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &CascadeService_ServiceDesc.Streams[0], CascadeService_Session_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[SessionRequest, SessionReply]{ClientStream: stream} + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type CascadeService_SessionClient = grpc.BidiStreamingClient[SessionRequest, SessionReply] + +func (c *cascadeServiceClient) UploadInputData(ctx context.Context, in *UploadInputDataRequest, opts ...grpc.CallOption) (*UploadInputDataResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(UploadInputDataResponse) + err := c.cc.Invoke(ctx, CascadeService_UploadInputData_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CascadeServiceServer is the server API for CascadeService service. +// All implementations must embed UnimplementedCascadeServiceServer +// for forward compatibility. +type CascadeServiceServer interface { + Session(grpc.BidiStreamingServer[SessionRequest, SessionReply]) error + UploadInputData(context.Context, *UploadInputDataRequest) (*UploadInputDataResponse, error) + mustEmbedUnimplementedCascadeServiceServer() +} + +// UnimplementedCascadeServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedCascadeServiceServer struct{} + +func (UnimplementedCascadeServiceServer) Session(grpc.BidiStreamingServer[SessionRequest, SessionReply]) error { + return status.Errorf(codes.Unimplemented, "method Session not implemented") +} +func (UnimplementedCascadeServiceServer) UploadInputData(context.Context, *UploadInputDataRequest) (*UploadInputDataResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UploadInputData not implemented") +} +func (UnimplementedCascadeServiceServer) mustEmbedUnimplementedCascadeServiceServer() {} +func (UnimplementedCascadeServiceServer) testEmbeddedByValue() {} + +// UnsafeCascadeServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CascadeServiceServer will +// result in compilation errors. +type UnsafeCascadeServiceServer interface { + mustEmbedUnimplementedCascadeServiceServer() +} + +func RegisterCascadeServiceServer(s grpc.ServiceRegistrar, srv CascadeServiceServer) { + // If the following call pancis, it indicates UnimplementedCascadeServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&CascadeService_ServiceDesc, srv) +} + +func _CascadeService_Session_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(CascadeServiceServer).Session(&grpc.GenericServerStream[SessionRequest, SessionReply]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type CascadeService_SessionServer = grpc.BidiStreamingServer[SessionRequest, SessionReply] + +func _CascadeService_UploadInputData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UploadInputDataRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CascadeServiceServer).UploadInputData(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CascadeService_UploadInputData_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CascadeServiceServer).UploadInputData(ctx, req.(*UploadInputDataRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// CascadeService_ServiceDesc is the grpc.ServiceDesc for CascadeService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var CascadeService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "cascade.CascadeService", + HandlerType: (*CascadeServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UploadInputData", + Handler: _CascadeService_UploadInputData_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Session", + Handler: _CascadeService_Session_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "proto/supernode/action/cascade/service.proto", +} diff --git a/gen/supernode/nft/.gitkeep b/gen/supernode/nft/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/go.mod b/go.mod index b7a2a525..c19e7db7 100644 --- a/go.mod +++ b/go.mod @@ -10,17 +10,20 @@ require ( github.com/cosmos/cosmos-sdk v0.50.12 github.com/cosmos/go-bip39 v1.0.0 github.com/cosmos/gogoproto v1.7.0 + github.com/disintegration/imaging v1.6.2 github.com/go-errors/errors v1.5.1 github.com/golang/mock v1.6.0 github.com/google/uuid v1.6.0 github.com/jmoiron/sqlx v1.4.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.11 + github.com/kolesa-team/go-webp v1.0.4 github.com/mattn/go-sqlite3 v1.14.24 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.8.1 + github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.10.0 github.com/x-cray/logrus-prefixed-formatter v0.5.2 go.uber.org/ratelimit v0.3.1 @@ -146,7 +149,6 @@ require ( github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.7.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.19.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect @@ -157,6 +159,7 @@ require ( go.etcd.io/bbolt v1.3.10 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect + golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d // indirect golang.org/x/net v0.35.0 // indirect golang.org/x/term v0.29.0 // indirect golang.org/x/text v0.22.0 // indirect diff --git a/go.sum b/go.sum index 1c100338..2a421fba 100644 --- a/go.sum +++ b/go.sum @@ -229,6 +229,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c= +github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -515,6 +517,8 @@ github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/kolesa-team/go-webp v1.0.4 h1:wQvU4PLG/X7RS0vAeyhiivhLRoxfLVRlDq4I3frdxIQ= +github.com/kolesa-team/go-webp v1.0.4/go.mod h1:oMvdivD6K+Q5qIIkVC2w4k2ZUnI1H+MyP7inwgWq9aA= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -872,6 +876,9 @@ golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0J golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d h1:RNPAfi2nHY7C2srAV8A49jpsYr0ADedCk1wq6fTMTvs= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= diff --git a/pkg/common/blocktracker/block_tracker.go b/pkg/common/blocktracker/block_tracker.go new file mode 100644 index 00000000..13a5c5fd --- /dev/null +++ b/pkg/common/blocktracker/block_tracker.go @@ -0,0 +1,121 @@ +package blocktracker + +import ( + "context" + "sync" + "time" + + "github.com/LumeraProtocol/supernode/pkg/errors" +) + +const ( + defaultRetries = 3 + defaultDelayDurationBetweenRetries = 5 * time.Second + defaultRPCConnectTimeout = 15 * time.Second + // Update duration in case last update was success + defaultSuccessUpdateDuration = 10 * time.Second + // Update duration in case last update was failed - prevent too much call to Lumera + defaultFailedUpdateDuration = 5 * time.Second + defaultNextBlockTimeout = 30 * time.Minute +) + +// LumeraClient defines interface functions BlockCntTracker expects from Lumera +type LumeraClient interface { + // GetBlockCount returns block height of blockchain + GetBlockCount(ctx context.Context) (int32, error) +} + +// BlockCntTracker defines a block tracker - that will keep current block height +type BlockCntTracker struct { + mtx sync.Mutex + LumeraClient LumeraClient + curBlockCnt int32 + lastSuccess time.Time + lastRetried time.Time + lastErr error + delayBetweenRetries time.Duration + retries int +} + +// New returns an instance of BlockCntTracker +func New(LumeraClient LumeraClient) *BlockCntTracker { + return &BlockCntTracker{ + LumeraClient: LumeraClient, + curBlockCnt: 0, + delayBetweenRetries: defaultDelayDurationBetweenRetries, + retries: defaultRetries, + } +} + +func (tracker *BlockCntTracker) refreshBlockCount(retries int) { + tracker.lastRetried = time.Now().UTC() + for i := 0; i < retries; i = i + 1 { + ctx, cancel := context.WithTimeout(context.Background(), defaultRPCConnectTimeout) + blockCnt, err := tracker.LumeraClient.GetBlockCount(ctx) + if err == nil { + tracker.curBlockCnt = blockCnt + tracker.lastSuccess = time.Now().UTC() + cancel() + tracker.lastErr = nil + return + } + cancel() + + tracker.lastErr = err + // delay between retries + time.Sleep(tracker.delayBetweenRetries) + } + +} + +// GetBlockCount return current block count +// it will get from cache if last refresh is small than defaultSuccessUpdateDuration +// or will refresh it by call from Lumera daemon to get the latest one if defaultSuccessUpdateDuration expired +func (tracker *BlockCntTracker) GetBlockCount() (int32, error) { + tracker.mtx.Lock() + defer tracker.mtx.Unlock() + + shouldRefresh := false + + if tracker.lastSuccess.After(tracker.lastRetried) { + if time.Now().UTC().After(tracker.lastSuccess.Add(defaultSuccessUpdateDuration)) { + shouldRefresh = true + } + } else { + // prevent update too much + if time.Now().UTC().After(tracker.lastRetried.Add(defaultFailedUpdateDuration)) { + shouldRefresh = true + } + } + + if shouldRefresh { + tracker.refreshBlockCount(tracker.retries) + } + + if tracker.curBlockCnt == 0 { + return 0, errors.Errorf("failed to get blockcount: %w", tracker.lastErr) + } + + return tracker.curBlockCnt, nil +} + +// WaitTillNextBlock will wait until next block height is greater than blockCnt +func (tracker *BlockCntTracker) WaitTillNextBlock(ctx context.Context, blockCnt int32) error { + for { + select { + case <-ctx.Done(): + return errors.Errorf("context done: %w", ctx.Err()) + case <-time.After(defaultNextBlockTimeout): + return errors.Errorf("timeout waiting for next block") + case <-time.After(defaultSuccessUpdateDuration): + curBlockCnt, err := tracker.GetBlockCount() + if err != nil { + return errors.Errorf("failed to get blockcount: %w", err) + } + + if curBlockCnt > blockCnt { + return nil + } + } + } +} diff --git a/pkg/common/blocktracker/block_tracker_test.go b/pkg/common/blocktracker/block_tracker_test.go new file mode 100644 index 00000000..b070a4b7 --- /dev/null +++ b/pkg/common/blocktracker/block_tracker_test.go @@ -0,0 +1,97 @@ +package blocktracker + +import ( + "context" + "errors" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +type fakePastelClient struct { + retBlockCnt int32 + retErr error +} + +func (fake *fakePastelClient) GetBlockCount(_ context.Context) (int32, error) { + return fake.retBlockCnt, fake.retErr +} + +func TestGetCountFirstTime(t *testing.T) { + tests := []struct { + name string + pastelClient *fakePastelClient + expectErr bool + }{ + { + name: "success", + pastelClient: &fakePastelClient{ + retBlockCnt: 10, + retErr: nil, + }, + expectErr: false, + }, + { + name: "fail", + pastelClient: &fakePastelClient{ + retBlockCnt: 0, + retErr: errors.New("error"), + }, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tracker := New(tt.pastelClient) + tracker.retries = 1 + blkCnt, err := tracker.GetBlockCount() + assert.Equal(t, tt.pastelClient.retBlockCnt, blkCnt) + if tt.expectErr { + assert.True(t, strings.Contains(err.Error(), tt.pastelClient.retErr.Error())) + } else { + assert.Nil(t, err) + } + }) + } +} + +func TestGetBlockCountNoRefresh(t *testing.T) { + pastelClient := &fakePastelClient{ + retBlockCnt: 10, + retErr: errors.New("error"), + } + + expectedBlk := int32(1) + tracker := New(pastelClient) + tracker.retries = 1 + tracker.curBlockCnt = expectedBlk + tracker.lastRetried = time.Now().UTC() + tracker.lastSuccess = time.Now().UTC() + + blkCnt, err := tracker.GetBlockCount() + assert.Equal(t, expectedBlk, blkCnt) + + assert.Nil(t, err) +} + +func TestGetBlockCountRefresh(t *testing.T) { + expectedBlk := int32(10) + pastelClient := &fakePastelClient{ + retBlockCnt: expectedBlk, + retErr: nil, + } + + tracker := New(pastelClient) + tracker.retries = 1 + tracker.curBlockCnt = 1 + tracker.lastRetried = time.Now().UTC().Add(-defaultSuccessUpdateDuration) + tracker.lastSuccess = time.Now().UTC().Add(-defaultSuccessUpdateDuration) + + blkCnt, err := tracker.GetBlockCount() + assert.Equal(t, expectedBlk, blkCnt) + + assert.Nil(t, err) +} diff --git a/pkg/common/task/action.go b/pkg/common/task/action.go new file mode 100644 index 00000000..227ebe35 --- /dev/null +++ b/pkg/common/task/action.go @@ -0,0 +1,20 @@ +package task + +import "context" + +// ActionFn represents a function that is run inside a goroutine. +type ActionFn func(ctx context.Context) error + +// Action represents the action of the task. +type Action struct { + fn ActionFn + doneCh chan struct{} +} + +// NewAction returns a new Action instance. +func NewAction(fn ActionFn) *Action { + return &Action{ + fn: fn, + doneCh: make(chan struct{}), + } +} diff --git a/pkg/common/task/state/state.go b/pkg/common/task/state/state.go new file mode 100644 index 00000000..37bfe9e2 --- /dev/null +++ b/pkg/common/task/state/state.go @@ -0,0 +1,174 @@ +//go:generate mockery --name=State + +package state + +import ( + "context" + "sync" + "time" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/storage/queries" + "github.com/LumeraProtocol/supernode/pkg/types" +) + +// State represents a state of the task. +type State interface { + // Status returns the current status. + Status() *Status + + // SetStatusNotifyFunc sets a function to be called after the state is updated. + SetStatusNotifyFunc(fn func(status *Status)) + + // RequiredStatus returns an error if the current status doen't match the given one. + RequiredStatus(subStatus SubStatus) error + + // StatusHistory returns all history from the very beginning. + StatusHistory() []*Status + + // UpdateStatus updates the status of the state by creating a new status with the given `status`. + UpdateStatus(subStatus SubStatus) + + // SubscribeStatus returns a new subscription of the state. + SubscribeStatus() func() <-chan *Status + + //SetStateLog set the wallet node task status log to the state status log + SetStateLog(statusLog types.Fields) + + //InitialiseHistoryDB sets the connection to historyDB + InitialiseHistoryDB(store queries.LocalStoreInterface) +} + +type state struct { + status *Status + history []*Status + + notifyFn func(status *Status) + sync.RWMutex + subsCh []chan *Status + taskID string + statusLog types.Fields + historyDBStore queries.LocalStoreInterface +} + +// Status implements State.Status() +func (state *state) Status() *Status { + return state.status +} + +// SetStatusNotifyFunc implements State.SetStatusNotifyFunc() +func (state *state) SetStatusNotifyFunc(fn func(status *Status)) { + state.notifyFn = fn +} + +// RequiredStatus implements State.RequiredStatus() +func (state *state) RequiredStatus(subStatus SubStatus) error { + if state.status.Is(subStatus) { + return nil + } + return errors.Errorf("required status %q, current %q", subStatus, state.status) +} + +// StatusHistory implements State.StatusHistory() +func (state *state) StatusHistory() []*Status { + state.RLock() + defer state.RUnlock() + + return append(state.history, state.status) +} + +// UpdateStatus implements State.UpdateStatus() +func (state *state) UpdateStatus(subStatus SubStatus) { + state.Lock() + defer state.Unlock() + + status := NewStatus(subStatus) + state.history = append(state.history, state.status) + state.status = status + + history := types.TaskHistory{CreatedAt: time.Now().UTC(), TaskID: state.taskID, Status: status.String()} + if state.statusLog.IsValid() { + history.Details = types.NewDetails(status.String(), state.statusLog) + } + + if state.historyDBStore != nil { + if _, err := state.historyDBStore.InsertTaskHistory(history); err != nil { + log.WithError(err).Error("unable to store task status") + } + } else { + store, err := queries.OpenHistoryDB() + if err != nil { + log.WithError(err).Debug("error opening history db") + } + + if store != nil { + defer store.CloseHistoryDB(context.Background()) + if _, err := store.InsertTaskHistory(history); err != nil { + log.WithError(err).Debug("unable to store task status") + } + } + } + + if state.notifyFn != nil { + state.notifyFn(status) + } + + for _, subCh := range state.subsCh { + subCh := subCh + go func() { + subCh <- status + }() + } +} + +// SubscribeStatus implements State.SubscribeStatus() +func (state *state) SubscribeStatus() func() <-chan *Status { + state.RLock() + defer state.RUnlock() + + subCh := make(chan *Status) + state.subsCh = append(state.subsCh, subCh) + + for _, status := range append(state.history, state.status) { + status := status + go func() { + subCh <- status + }() + } + + sub := func() <-chan *Status { + return subCh + } + return sub +} + +func (state *state) SetStateLog(statusLog types.Fields) { + state.statusLog = statusLog +} + +func (state *state) InitialiseHistoryDB(storeInterface queries.LocalStoreInterface) { + state.historyDBStore = storeInterface +} + +// New returns a new state instance. +func New(subStatus SubStatus, taskID string) State { + store, err := queries.OpenHistoryDB() + if err != nil { + log.WithError(err).Error("error opening history db") + } + + if store != nil { + defer store.CloseHistoryDB(context.Background()) + + if _, err := store.InsertTaskHistory(types.TaskHistory{CreatedAt: time.Now().UTC(), TaskID: taskID, + Status: subStatus.String()}); err != nil { + log.WithError(err).Error("unable to store task status") + } + } + + return &state{ + status: NewStatus(subStatus), + taskID: taskID, + } +} diff --git a/pkg/common/task/state/status.go b/pkg/common/task/state/status.go new file mode 100644 index 00000000..b1b00da6 --- /dev/null +++ b/pkg/common/task/state/status.go @@ -0,0 +1,34 @@ +//go:generate mockery --name=SubStatus + +package state + +import ( + "fmt" + "time" +) + +// SubStatus represents a sub-status that contains a description of the status. +type SubStatus interface { + fmt.Stringer + IsFinal() bool + IsFailure() bool +} + +// Status represents a state of the task. +type Status struct { + CreatedAt time.Time + SubStatus +} + +// Is returns true if the current `Status` matches to the given `statuses`. +func (status *Status) Is(subStatus SubStatus) bool { + return status.SubStatus == subStatus +} + +// NewStatus returns a new Status instance. +func NewStatus(subStatus SubStatus) *Status { + return &Status{ + CreatedAt: time.Now().UTC(), + SubStatus: subStatus, + } +} diff --git a/pkg/common/task/task.go b/pkg/common/task/task.go new file mode 100644 index 00000000..88a64add --- /dev/null +++ b/pkg/common/task/task.go @@ -0,0 +1,143 @@ +//go:generate mockery --name=Task + +package task + +import ( + "context" + "sync" + + "github.com/LumeraProtocol/supernode/pkg/common/task/state" + "github.com/LumeraProtocol/supernode/pkg/errgroup" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/random" +) + +// Task represent a worker task. +type Task interface { + state.State + + // ID returns id of the task. + ID() string + + // Run starts the task. + Run(ctx context.Context) error + + // Cancel tells a task to abandon its work. + // Cancel may be called by multiple goroutines simultaneously. + // After the first call, subsequent calls to a Cancel do nothing. + Cancel() + + // Done returns a channel when the task is canceled. + Done() <-chan struct{} + + // RunAction waits for new actions, starts handling each of them in a new goroutine. + RunAction(ctx context.Context) error + + // NewAction creates a new action and passes for the execution. + // It is used when it is necessary to run an action in the context of `Tasks` rather than the one who was called. + NewAction(fn ActionFn) <-chan struct{} + + // CloseActionCh closes action ch + CloseActionCh() +} + +type task struct { + state.State + + id string + + actionCh chan *Action + + doneMu sync.Mutex + doneCh chan struct{} + closeOnce sync.Once +} + +// ID implements Task.ID +func (task *task) ID() string { + return task.id +} + +// Run implements Task.Run +func (task *task) Run(_ context.Context) error { + return errors.New("task default run func not implemented") +} + +// Cancel implements Task.Cancel +func (task *task) Cancel() { + task.doneMu.Lock() + defer task.doneMu.Unlock() + + select { + case <-task.Done(): + log.Debugf("task %s cancelled", task.ID()) + return + default: + close(task.doneCh) + } +} + +// Done implements Task.Done +func (task *task) Done() <-chan struct{} { + return task.doneCh +} + +// RunAction implements Task.RunAction +func (task *task) RunAction(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + group, ctx := errgroup.WithContext(ctx) + for { + select { + case <-ctx.Done(): + log.WithContext(ctx).WithField("task", task.ID()).Info("context done") + case <-task.Done(): + log.WithContext(ctx).Infof("task %s done", task.ID()) + cancel() + case action, ok := <-task.actionCh: + if !ok { + log.WithContext(ctx).Info("action channel closed") + return group.Wait() + } + + currAction := action + group.Go(func() error { + defer close(currAction.doneCh) + + return currAction.fn(ctx) + }) + continue + } + break + } + + return group.Wait() +} + +// CloseActionCh safely closes the action channel +func (task *task) CloseActionCh() { + task.closeOnce.Do(func() { + close(task.actionCh) + }) +} + +// NewAction implements Task.NewAction +func (task *task) NewAction(fn ActionFn) <-chan struct{} { + act := NewAction(fn) + task.actionCh <- act + return act.doneCh +} + +// New returns a new task instance. +func New(status state.SubStatus) Task { + taskID, _ := random.String(8, random.Base62Chars) + + return &task{ + State: state.New(status, taskID), + id: taskID, + doneCh: make(chan struct{}), + actionCh: make(chan *Action), + } +} diff --git a/pkg/common/task/ticket.go b/pkg/common/task/ticket.go new file mode 100644 index 00000000..561b8f0b --- /dev/null +++ b/pkg/common/task/ticket.go @@ -0,0 +1,13 @@ +package task + +type CascadeTicket struct { + Creator string `json:"creator"` + CreatorSignature []byte `json:"creator_signature"` + DataHash string `json:"data_hash"` + ActionID string `json:"action_id"` + BlockHeight int64 `json:"block_height"` + BlockHash []byte `json:"block_hash"` + RQIDsIC uint32 `json:"rqids_ic"` + RQIDsMax int32 `json:"rqids_max"` + RQIDs []string `json:"rq_ids"` +} diff --git a/pkg/common/task/worker.go b/pkg/common/task/worker.go new file mode 100644 index 00000000..724d74c5 --- /dev/null +++ b/pkg/common/task/worker.go @@ -0,0 +1,90 @@ +package task + +import ( + "context" + "sync" + + "github.com/LumeraProtocol/supernode/pkg/errgroup" + "github.com/LumeraProtocol/supernode/pkg/log" +) + +// Worker represents a pool of the task. +type Worker struct { + sync.Mutex + + tasks []Task + taskCh chan Task +} + +// Tasks returns all tasks. +func (worker *Worker) Tasks() []Task { + return worker.tasks +} + +// Task returns the task by the given id. +func (worker *Worker) Task(taskID string) Task { + worker.Lock() + defer worker.Unlock() + + for _, task := range worker.tasks { + if task.ID() == taskID { + return task + } + } + return nil +} + +// AddTask adds the new task. +func (worker *Worker) AddTask(task Task) { + worker.Lock() + defer worker.Unlock() + + worker.tasks = append(worker.tasks, task) + worker.taskCh <- task +} + +// RemoveTask removes the task. +func (worker *Worker) RemoveTask(subTask Task) { + worker.Lock() + defer worker.Unlock() + + for i, task := range worker.tasks { + if task == subTask { + worker.tasks = append(worker.tasks[:i], worker.tasks[i+1:]...) + return + } + } +} + +// Run waits for new tasks, starts handling each of them in a new goroutine. +func (worker *Worker) Run(ctx context.Context) error { + group, _ := errgroup.WithContext(ctx) // Create an error group but ignore the derived context + for { + select { + case <-ctx.Done(): + log.WithContext(ctx).Warn("worker run stopping : %w", ctx.Err()) + return group.Wait() + case t := <-worker.taskCh: // Rename here + currentTask := t // Capture the loop variable + group.Go(func() error { + defer func() { + if r := recover(); r != nil { + log.WithContext(ctx).Errorf("Recovered from panic in common task's worker run: %v", r) + } + + log.WithContext(ctx).WithField("task", currentTask.ID()).Info("Task Removed") + worker.RemoveTask(currentTask) + }() + + return currentTask.Run(ctx) // Use the captured variable + }) + } + } +} + +// NewWorker returns a new Worker instance. +func NewWorker() *Worker { + return &Worker{ + taskCh: make(chan Task), + } +} diff --git a/pkg/common/task/worker_test.go b/pkg/common/task/worker_test.go new file mode 100644 index 00000000..4c5f21ac --- /dev/null +++ b/pkg/common/task/worker_test.go @@ -0,0 +1,147 @@ +package task + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWorkerTasks(t *testing.T) { + t.Parallel() + + type fields struct { + tasks []Task + } + tests := []struct { + name string + fields fields + want []Task + }{ + { + name: "retrieve tasks", + fields: fields{ + tasks: []Task{&task{id: "1"}, &task{id: "2"}}, + }, + want: []Task{&task{id: "1"}, &task{id: "2"}}, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + worker := &Worker{ + tasks: tt.fields.tasks, + } + assert.Equal(t, tt.want, worker.Tasks()) + }) + } +} + +func TestWorkerTask(t *testing.T) { + t.Parallel() + + type fields struct { + tasks []Task + } + type args struct { + taskID string + } + tests := []struct { + name string + fields fields + args args + want Task + }{ + { + name: "get task with id 1", + fields: fields{ + tasks: []Task{&task{id: "1"}, &task{id: "2"}}, + }, + args: args{"2"}, + want: &task{id: "2"}, + }, + { + name: "get not exist task", + fields: fields{ + tasks: []Task{&task{id: "1"}, &task{id: "2"}}, + }, + args: args{"3"}, + want: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + worker := &Worker{ + tasks: tt.fields.tasks, + } + assert.Equal(t, tt.want, worker.Task(tt.args.taskID)) + }) + } +} + +func TestWorkerAddTask(t *testing.T) { + t.Parallel() + + type args struct { + task Task + } + tests := []struct { + name string + args args + want []Task + }{ + { + name: "add task", + args: args{&task{id: "1"}}, + want: []Task{&task{id: "1"}}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + worker := &Worker{ + taskCh: make(chan Task), + } + + go func() { + worker.AddTask(tt.args.task) + }() + + <-worker.taskCh + tasks := worker.tasks + assert.Equal(t, tt.want, tasks) + + }) + } +} + +func TestWorkerRemoveTask(t *testing.T) { + t.Parallel() + + type args struct { + subTask Task + } + tests := []struct { + name string + args args + want []Task + }{ + { + name: "removed task", + args: args{&task{id: "1"}}, + want: []Task{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + worker := &Worker{ + tasks: []Task{tt.args.subTask}, + } + + worker.RemoveTask(tt.args.subTask) + assert.Equal(t, tt.want, worker.tasks) + }) + } +} diff --git a/pkg/configurer/file.go b/pkg/configurer/file.go new file mode 100644 index 00000000..49fab2bc --- /dev/null +++ b/pkg/configurer/file.go @@ -0,0 +1,58 @@ +package configurer + +import ( + "path/filepath" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/spf13/viper" +) + +// SetDefaultConfigPaths sets default paths for Viper to search for the config file in. +func SetDefaultConfigPaths(paths ...string) { + defaultConfigPaths = paths +} + +// ParseFile parses the config file from the given path `filename`, and assign it to the struct `config`. +func ParseFile(filename string, config interface{}) error { + var configType string + + switch filepath.Ext(filename) { + case ".conf": + configType = "env" + } + + return parseFile(filename, configType, config) +} + +// ParseJSONFile parses json config file from the given path `filename`, and assign it to the struct `config`. +func ParseJSONFile(filename string, config interface{}) error { + return parseFile(filename, "json", config) +} + +func parseFile(filename, configType string, config interface{}) error { + conf := viper.New() + + for _, configPath := range defaultConfigPaths { + conf.AddConfigPath(filepath.FromSlash(configPath)) + } + + if dir, _ := filepath.Split(filename); dir != "" { + conf.SetConfigFile(filename) + } else { + conf.SetConfigName(filename) + } + + if configType != "" { + conf.SetConfigType(configType) + } + + if err := conf.ReadInConfig(); err != nil { + return errors.Errorf("could not read config file: %w", err) + } + + if err := conf.Unmarshal(&config); err != nil { + return errors.Errorf("unable to decode into struct, %w", err) + } + + return nil +} diff --git a/pkg/configurer/path_darwin.go b/pkg/configurer/path_darwin.go new file mode 100644 index 00000000..c46d6f0d --- /dev/null +++ b/pkg/configurer/path_darwin.go @@ -0,0 +1,20 @@ +//go:build darwin +// +build darwin + +package configurer + +import ( + "os" + "path/filepath" +) + +var defaultConfigPaths = []string{ + "$HOME/Library/Application Support/Lumera", + ".", +} + +// DefaultPath returns the default config path for darwin OS. +func DefaultPath() string { + homeDir, _ := os.UserConfigDir() + return filepath.Join(homeDir, "Lumera") +} diff --git a/pkg/configurer/path_linux.go b/pkg/configurer/path_linux.go new file mode 100644 index 00000000..4c5782d1 --- /dev/null +++ b/pkg/configurer/path_linux.go @@ -0,0 +1,20 @@ +//go:build linux +// +build linux + +package configurer + +import ( + "os" + "path/filepath" +) + +var defaultConfigPaths = []string{ + "$HOME/.lumera", + ".", +} + +// DefaultPath returns the default config path for Linux OS. +func DefaultPath() string { + homeDir, _ := os.UserHomeDir() + return filepath.Join(homeDir, ".lumera") +} diff --git a/pkg/configurer/path_windows.go b/pkg/configurer/path_windows.go new file mode 100644 index 00000000..f9dd27ce --- /dev/null +++ b/pkg/configurer/path_windows.go @@ -0,0 +1,34 @@ +//go:build windows +// +build windows + +package configurer + +import ( + "os" + "path" + "path/filepath" + "syscall" +) + +const ( + beforeVistaAppDir = "Application Data" + sinceVistaAppDir = "AppData/Roaming" +) + +var defaultConfigPaths = []string{ + path.Join("$HOME", beforeVistaAppDir, "Lumera"), + path.Join("$HOME", sinceVistaAppDir, "Lumera"), + ".", +} + +// DefaultPath returns the default config path for Windows OS. +func DefaultPath() string { + homeDir, _ := os.UserHomeDir() + appDir := beforeVistaAppDir + + v, _ := syscall.GetVersion() + if v&0xff > 5 { + appDir = sinceVistaAppDir + } + return filepath.Join(homeDir, filepath.FromSlash(appDir), "Lumera") +} diff --git a/pkg/errgroup/errgroup.go b/pkg/errgroup/errgroup.go new file mode 100644 index 00000000..ca2b3fe8 --- /dev/null +++ b/pkg/errgroup/errgroup.go @@ -0,0 +1,37 @@ +package errgroup + +import ( + "context" + "runtime/debug" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + + "golang.org/x/sync/errgroup" +) + +// A Group is a collection of goroutines working on subtasks that are part of the same overall task. +type Group struct { + *errgroup.Group +} + +// Go calls the given function in a new goroutine and tries to recover from panics. +func (group *Group) Go(fn func() error) { + group.Group.Go(func() (err error) { + defer errors.Recover(func(recErr error) { + fields := logtrace.Fields{ + logtrace.FieldError: recErr.Error(), + logtrace.FieldStackTrace: debug.Stack(), + } + logtrace.Error(context.Background(), "errgroup panic", fields) + err = recErr + }) + return fn() + }) +} + +// WithContext returns a new Group and an associated Context derived from ctx. +func WithContext(ctx context.Context) (*Group, context.Context) { + group, ctx := errgroup.WithContext(ctx) + return &Group{group}, ctx +} diff --git a/pkg/logtrace/fields.go b/pkg/logtrace/fields.go index 9833af8c..9b28ac85 100644 --- a/pkg/logtrace/fields.go +++ b/pkg/logtrace/fields.go @@ -4,15 +4,18 @@ package logtrace type Fields map[string]interface{} const ( - FieldCorrelationID = "correlation_id" - FieldMethod = "method" - FieldModule = "module" - FieldError = "error" - FieldStatus = "status" - FieldBlockHeight = "block_height" - FieldLimit = "limit" - FieldSupernodeState = "supernode_state" - FieldRequest = "request" + FieldCorrelationID = "correlation_id" + FieldMethod = "method" + FieldModule = "module" + FieldError = "error" + FieldStatus = "status" + FieldBlockHeight = "block_height" + FieldLimit = "limit" + FieldSupernodeState = "supernode_state" + FieldRequest = "request" + FieldSupernodeAccountAddress = "supernode_account_address" + FieldIsPrimary = "is_primary" + FieldStackTrace = "stack_trace" ValueLumeraSDK = "lumera-sdk" ValueActionSDK = "action-sdk" diff --git a/pkg/lumera/client.go b/pkg/lumera/client.go index edccfb54..a2cc142a 100644 --- a/pkg/lumera/client.go +++ b/pkg/lumera/client.go @@ -53,7 +53,7 @@ func newClient(ctx context.Context, opts ...Option) (Client, error) { return nil, err } - nodeModule, err := node.NewModule(conn.GetConn()) + nodeModule, err := node.NewModule(conn.GetConn(), cfg.keyring) if err != nil { conn.Close() return nil, err diff --git a/pkg/lumera/config.go b/pkg/lumera/config.go index 6a370bf0..9c9208bc 100644 --- a/pkg/lumera/config.go +++ b/pkg/lumera/config.go @@ -1,5 +1,7 @@ package lumera +import "github.com/cosmos/cosmos-sdk/crypto/keyring" + // Config holds all the configuration needed for the client type Config struct { // GRPCAddr is the gRPC endpoint address @@ -10,6 +12,9 @@ type Config struct { // Timeout is the default request timeout in seconds Timeout int + + // keyring is the keyring conf for the node sign & verify + keyring keyring.Keyring } // DefaultConfig returns a default configuration diff --git a/pkg/lumera/interface.go b/pkg/lumera/interface.go index 1e1d8737..47d38142 100644 --- a/pkg/lumera/interface.go +++ b/pkg/lumera/interface.go @@ -1,3 +1,4 @@ +//go:generate mockgen -destination=lumera_mock.go -package=lumera -source=interface.go package lumera import ( @@ -11,7 +12,6 @@ import ( // Client defines the main interface for interacting with Lumera blockchain type Client interface { - // Module accessors Action() action.Module SuperNode() supernode.Module Tx() tx.Module diff --git a/pkg/lumera/lumera_mock.go b/pkg/lumera/lumera_mock.go new file mode 100644 index 00000000..6e243a23 --- /dev/null +++ b/pkg/lumera/lumera_mock.go @@ -0,0 +1,108 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: interface.go + +// Package lumera is a generated GoMock package. +package lumera + +import ( + reflect "reflect" + + action "github.com/LumeraProtocol/supernode/pkg/lumera/modules/action" + node "github.com/LumeraProtocol/supernode/pkg/lumera/modules/node" + supernode "github.com/LumeraProtocol/supernode/pkg/lumera/modules/supernode" + tx "github.com/LumeraProtocol/supernode/pkg/lumera/modules/tx" + gomock "github.com/golang/mock/gomock" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// Action mocks base method. +func (m *MockClient) Action() action.Module { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Action") + ret0, _ := ret[0].(action.Module) + return ret0 +} + +// Action indicates an expected call of Action. +func (mr *MockClientMockRecorder) Action() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Action", reflect.TypeOf((*MockClient)(nil).Action)) +} + +// Close mocks base method. +func (m *MockClient) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) +} + +// Node mocks base method. +func (m *MockClient) Node() node.Module { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Node") + ret0, _ := ret[0].(node.Module) + return ret0 +} + +// Node indicates an expected call of Node. +func (mr *MockClientMockRecorder) Node() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Node", reflect.TypeOf((*MockClient)(nil).Node)) +} + +// SuperNode mocks base method. +func (m *MockClient) SuperNode() supernode.Module { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SuperNode") + ret0, _ := ret[0].(supernode.Module) + return ret0 +} + +// SuperNode indicates an expected call of SuperNode. +func (mr *MockClientMockRecorder) SuperNode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SuperNode", reflect.TypeOf((*MockClient)(nil).SuperNode)) +} + +// Tx mocks base method. +func (m *MockClient) Tx() tx.Module { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Tx") + ret0, _ := ret[0].(tx.Module) + return ret0 +} + +// Tx indicates an expected call of Tx. +func (mr *MockClientMockRecorder) Tx() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tx", reflect.TypeOf((*MockClient)(nil).Tx)) +} diff --git a/pkg/lumera/modules/action/action_mock.go b/pkg/lumera/modules/action/action_mock.go new file mode 100644 index 00000000..7f643155 --- /dev/null +++ b/pkg/lumera/modules/action/action_mock.go @@ -0,0 +1,66 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: interface.go + +// Package action is a generated GoMock package. +package action + +import ( + context "context" + reflect "reflect" + + types "github.com/LumeraProtocol/lumera/x/action/types" + gomock "github.com/golang/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// GetAction mocks base method. +func (m *MockModule) GetAction(ctx context.Context, actionID string) (*types.QueryGetActionResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAction", ctx, actionID) + ret0, _ := ret[0].(*types.QueryGetActionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAction indicates an expected call of GetAction. +func (mr *MockModuleMockRecorder) GetAction(ctx, actionID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAction", reflect.TypeOf((*MockModule)(nil).GetAction), ctx, actionID) +} + +// GetActionFee mocks base method. +func (m *MockModule) GetActionFee(ctx context.Context, dataSize string) (*types.QueryGetActionFeeResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActionFee", ctx, dataSize) + ret0, _ := ret[0].(*types.QueryGetActionFeeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActionFee indicates an expected call of GetActionFee. +func (mr *MockModuleMockRecorder) GetActionFee(ctx, dataSize interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActionFee", reflect.TypeOf((*MockModule)(nil).GetActionFee), ctx, dataSize) +} diff --git a/pkg/lumera/modules/action/interface.go b/pkg/lumera/modules/action/interface.go index 844d80c3..d084963b 100644 --- a/pkg/lumera/modules/action/interface.go +++ b/pkg/lumera/modules/action/interface.go @@ -1,3 +1,4 @@ +//go:generate mockgen -destination=action_mock.go -package=action -source=interface.go package action import ( diff --git a/pkg/lumera/modules/node/impl.go b/pkg/lumera/modules/node/impl.go index e1d9deea..5d65b3bf 100644 --- a/pkg/lumera/modules/node/impl.go +++ b/pkg/lumera/modules/node/impl.go @@ -5,22 +5,28 @@ import ( "fmt" cmtservice "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/types" + signingtypes "github.com/cosmos/cosmos-sdk/types/tx/signing" + "google.golang.org/grpc" ) // module implements the Module interface type module struct { + kr keyring.Keyring client cmtservice.ServiceClient } // newModule creates a new Node module client -func newModule(conn *grpc.ClientConn) (Module, error) { +func newModule(conn *grpc.ClientConn, keyring keyring.Keyring) (Module, error) { if conn == nil { return nil, fmt.Errorf("connection cannot be nil") } return &module{ client: cmtservice.NewServiceClient(conn), + kr: keyring, }, nil } @@ -87,3 +93,45 @@ func (m *module) GetValidatorSetByHeight(ctx context.Context, height int64) (*cm return resp, nil } + +func (m *module) Sign(snAccAddress string, data []byte) (signature []byte, err error) { + accAddr, err := types.AccAddressFromBech32(snAccAddress) + if err != nil { + return signature, fmt.Errorf("invalid address: %w", err) + } + + _, err = m.kr.KeyByAddress(accAddr) + if err != nil { + return signature, fmt.Errorf("address not found in keyring: %w", err) + } + + signature, _, err = m.kr.SignByAddress(accAddr, data, signingtypes.SignMode_SIGN_MODE_DIRECT) + if err != nil { + return nil, fmt.Errorf("failed to sign data: %w", err) + } + + return signature, nil +} + +func (m *module) Verify(accAddress string, data, signature []byte) (err error) { + addr, err := types.AccAddressFromBech32(accAddress) + if err != nil { + return fmt.Errorf("invalid address: %w", err) + } + + keyInfo, err := m.kr.KeyByAddress(addr) + if err != nil { + return fmt.Errorf("address not found in keyring: %w", err) + } + + pubKey, err := keyInfo.GetPubKey() + if err != nil { + return fmt.Errorf("failed to get public key: %w", err) + } + + if !pubKey.VerifySignature(data, signature) { + return fmt.Errorf("invalid signature") + } + + return nil +} diff --git a/pkg/lumera/modules/node/interface.go b/pkg/lumera/modules/node/interface.go index 0694e2af..d96135e9 100644 --- a/pkg/lumera/modules/node/interface.go +++ b/pkg/lumera/modules/node/interface.go @@ -1,9 +1,11 @@ +//go:generate mockgen -destination=node_mock.go -package=node -source=interface.go package node import ( "context" cmtservice "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" + "github.com/cosmos/cosmos-sdk/crypto/keyring" "google.golang.org/grpc" ) @@ -26,9 +28,15 @@ type Module interface { // GetValidatorSetByHeight gets the validator set at a specific height GetValidatorSetByHeight(ctx context.Context, height int64) (*cmtservice.GetValidatorSetByHeightResponse, error) + + // Sign signs the given bytes with the supernodeAccountAddress and returns the signature + Sign(snAccAddress string, data []byte) (signature []byte, err error) + + // Verify verifies the given bytes with given supernodeAccAddress public key and returns the error + Verify(accAddress string, data, signature []byte) (err error) } // NewModule creates a new Node module client -func NewModule(conn *grpc.ClientConn) (Module, error) { - return newModule(conn) +func NewModule(conn *grpc.ClientConn, kr keyring.Keyring) (Module, error) { + return newModule(conn, kr) } diff --git a/pkg/lumera/modules/node/node_mock.go b/pkg/lumera/modules/node/node_mock.go new file mode 100644 index 00000000..953e65e2 --- /dev/null +++ b/pkg/lumera/modules/node/node_mock.go @@ -0,0 +1,155 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: interface.go + +// Package node is a generated GoMock package. +package node + +import ( + context "context" + reflect "reflect" + + cmtservice "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" + gomock "github.com/golang/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// GetBlockByHeight mocks base method. +func (m *MockModule) GetBlockByHeight(ctx context.Context, height int64) (*cmtservice.GetBlockByHeightResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockByHeight", ctx, height) + ret0, _ := ret[0].(*cmtservice.GetBlockByHeightResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockByHeight indicates an expected call of GetBlockByHeight. +func (mr *MockModuleMockRecorder) GetBlockByHeight(ctx, height interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByHeight", reflect.TypeOf((*MockModule)(nil).GetBlockByHeight), ctx, height) +} + +// GetLatestBlock mocks base method. +func (m *MockModule) GetLatestBlock(ctx context.Context) (*cmtservice.GetLatestBlockResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLatestBlock", ctx) + ret0, _ := ret[0].(*cmtservice.GetLatestBlockResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetLatestBlock indicates an expected call of GetLatestBlock. +func (mr *MockModuleMockRecorder) GetLatestBlock(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLatestBlock", reflect.TypeOf((*MockModule)(nil).GetLatestBlock), ctx) +} + +// GetLatestValidatorSet mocks base method. +func (m *MockModule) GetLatestValidatorSet(ctx context.Context) (*cmtservice.GetLatestValidatorSetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLatestValidatorSet", ctx) + ret0, _ := ret[0].(*cmtservice.GetLatestValidatorSetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetLatestValidatorSet indicates an expected call of GetLatestValidatorSet. +func (mr *MockModuleMockRecorder) GetLatestValidatorSet(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLatestValidatorSet", reflect.TypeOf((*MockModule)(nil).GetLatestValidatorSet), ctx) +} + +// GetNodeInfo mocks base method. +func (m *MockModule) GetNodeInfo(ctx context.Context) (*cmtservice.GetNodeInfoResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNodeInfo", ctx) + ret0, _ := ret[0].(*cmtservice.GetNodeInfoResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNodeInfo indicates an expected call of GetNodeInfo. +func (mr *MockModuleMockRecorder) GetNodeInfo(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeInfo", reflect.TypeOf((*MockModule)(nil).GetNodeInfo), ctx) +} + +// GetSyncing mocks base method. +func (m *MockModule) GetSyncing(ctx context.Context) (*cmtservice.GetSyncingResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSyncing", ctx) + ret0, _ := ret[0].(*cmtservice.GetSyncingResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSyncing indicates an expected call of GetSyncing. +func (mr *MockModuleMockRecorder) GetSyncing(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSyncing", reflect.TypeOf((*MockModule)(nil).GetSyncing), ctx) +} + +// GetValidatorSetByHeight mocks base method. +func (m *MockModule) GetValidatorSetByHeight(ctx context.Context, height int64) (*cmtservice.GetValidatorSetByHeightResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetValidatorSetByHeight", ctx, height) + ret0, _ := ret[0].(*cmtservice.GetValidatorSetByHeightResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetValidatorSetByHeight indicates an expected call of GetValidatorSetByHeight. +func (mr *MockModuleMockRecorder) GetValidatorSetByHeight(ctx, height interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorSetByHeight", reflect.TypeOf((*MockModule)(nil).GetValidatorSetByHeight), ctx, height) +} + +// Sign mocks base method. +func (m *MockModule) Sign(snAccAddress string, data []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Sign", snAccAddress, data) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Sign indicates an expected call of Sign. +func (mr *MockModuleMockRecorder) Sign(snAccAddress, data interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sign", reflect.TypeOf((*MockModule)(nil).Sign), snAccAddress, data) +} + +// Verify mocks base method. +func (m *MockModule) Verify(accAddress string, data, signature []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Verify", accAddress, data, signature) + ret0, _ := ret[0].(error) + return ret0 +} + +// Verify indicates an expected call of Verify. +func (mr *MockModuleMockRecorder) Verify(accAddress, data, signature interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockModule)(nil).Verify), accAddress, data, signature) +} diff --git a/pkg/lumera/modules/supernode/impl.go b/pkg/lumera/modules/supernode/impl.go index b31d52c6..cf274d17 100644 --- a/pkg/lumera/modules/supernode/impl.go +++ b/pkg/lumera/modules/supernode/impl.go @@ -3,8 +3,11 @@ package supernode import ( "context" "fmt" + "sort" "github.com/LumeraProtocol/lumera/x/supernode/types" + "github.com/LumeraProtocol/supernode/pkg/errors" + "google.golang.org/grpc" ) @@ -47,3 +50,34 @@ func (m *module) GetSuperNode(ctx context.Context, address string) (*types.Query return resp, nil } + +func (m *module) GetSupernodeBySupernodeAddress(ctx context.Context, address string) (*types.SuperNode, error) { + resp, err := m.client.GetSuperNodeBySuperNodeAddress(ctx, &types.QueryGetSuperNodeBySuperNodeAddressRequest{ + SupernodeAddress: address, + }) + if err != nil { + fmt.Errorf("failed to get supernode: %w", err) + } + + return resp.Supernode, nil +} + +func Exists(nodes []*types.SuperNode, snAccAddress string) bool { + for _, sn := range nodes { + if sn.SupernodeAccount == snAccAddress { + return true + } + } + return false +} + +func GetLatestIP(supernode *types.SuperNode) (string, error) { + if len(supernode.PrevIpAddresses) == 0 { + return "", errors.Errorf("no ip history exists for the supernode") + } + sort.Slice(supernode.PrevIpAddresses, func(i, j int) bool { + return supernode.PrevIpAddresses[i].GetHeight() > supernode.PrevIpAddresses[j].GetHeight() + }) + + return supernode.PrevIpAddresses[0].Address, nil +} diff --git a/pkg/lumera/modules/supernode/interface.go b/pkg/lumera/modules/supernode/interface.go index 37e57b12..89d398de 100644 --- a/pkg/lumera/modules/supernode/interface.go +++ b/pkg/lumera/modules/supernode/interface.go @@ -1,3 +1,4 @@ +//go:generate mockgen -destination=supernode_mock.go -package=supernode -source=interface.go package supernode import ( @@ -11,6 +12,7 @@ import ( type Module interface { GetTopSuperNodesForBlock(ctx context.Context, blockHeight uint64) (*types.QueryGetTopSuperNodesForBlockResponse, error) GetSuperNode(ctx context.Context, address string) (*types.QueryGetSuperNodeResponse, error) + GetSupernodeBySupernodeAddress(ctx context.Context, address string) (*types.SuperNode, error) } // NewModule creates a new SuperNode module client diff --git a/pkg/lumera/modules/supernode/supernode_mock.go b/pkg/lumera/modules/supernode/supernode_mock.go new file mode 100644 index 00000000..682043a7 --- /dev/null +++ b/pkg/lumera/modules/supernode/supernode_mock.go @@ -0,0 +1,81 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: interface.go + +// Package supernode is a generated GoMock package. +package supernode + +import ( + context "context" + reflect "reflect" + + types "github.com/LumeraProtocol/lumera/x/supernode/types" + gomock "github.com/golang/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// GetSuperNode mocks base method. +func (m *MockModule) GetSuperNode(ctx context.Context, address string) (*types.QueryGetSuperNodeResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSuperNode", ctx, address) + ret0, _ := ret[0].(*types.QueryGetSuperNodeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSuperNode indicates an expected call of GetSuperNode. +func (mr *MockModuleMockRecorder) GetSuperNode(ctx, address interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSuperNode", reflect.TypeOf((*MockModule)(nil).GetSuperNode), ctx, address) +} + +// GetSupernodeBySupernodeAddress mocks base method. +func (m *MockModule) GetSupernodeBySupernodeAddress(ctx context.Context, address string) (*types.SuperNode, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSupernodeBySupernodeAddress", ctx, address) + ret0, _ := ret[0].(*types.SuperNode) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSupernodeBySupernodeAddress indicates an expected call of GetSupernodeBySupernodeAddress. +func (mr *MockModuleMockRecorder) GetSupernodeBySupernodeAddress(ctx, address interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSupernodeBySupernodeAddress", reflect.TypeOf((*MockModule)(nil).GetSupernodeBySupernodeAddress), ctx, address) +} + +// GetTopSuperNodesForBlock mocks base method. +func (m *MockModule) GetTopSuperNodesForBlock(ctx context.Context, blockHeight uint64) (*types.QueryGetTopSuperNodesForBlockResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTopSuperNodesForBlock", ctx, blockHeight) + ret0, _ := ret[0].(*types.QueryGetTopSuperNodesForBlockResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTopSuperNodesForBlock indicates an expected call of GetTopSuperNodesForBlock. +func (mr *MockModuleMockRecorder) GetTopSuperNodesForBlock(ctx, blockHeight interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTopSuperNodesForBlock", reflect.TypeOf((*MockModule)(nil).GetTopSuperNodesForBlock), ctx, blockHeight) +} diff --git a/pkg/lumera/modules/tx/interface.go b/pkg/lumera/modules/tx/interface.go index b5502c20..26df9ba8 100644 --- a/pkg/lumera/modules/tx/interface.go +++ b/pkg/lumera/modules/tx/interface.go @@ -1,3 +1,4 @@ +//go:generate mockgen -destination=tx_mock.go -package=tx -source=interface.go package tx import ( diff --git a/pkg/lumera/modules/tx/tx_mock.go b/pkg/lumera/modules/tx/tx_mock.go new file mode 100644 index 00000000..353b4150 --- /dev/null +++ b/pkg/lumera/modules/tx/tx_mock.go @@ -0,0 +1,81 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: interface.go + +// Package tx is a generated GoMock package. +package tx + +import ( + context "context" + reflect "reflect" + + tx "github.com/cosmos/cosmos-sdk/types/tx" + gomock "github.com/golang/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// BroadcastTx mocks base method. +func (m *MockModule) BroadcastTx(ctx context.Context, txBytes []byte, mode tx.BroadcastMode) (*tx.BroadcastTxResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BroadcastTx", ctx, txBytes, mode) + ret0, _ := ret[0].(*tx.BroadcastTxResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BroadcastTx indicates an expected call of BroadcastTx. +func (mr *MockModuleMockRecorder) BroadcastTx(ctx, txBytes, mode interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BroadcastTx", reflect.TypeOf((*MockModule)(nil).BroadcastTx), ctx, txBytes, mode) +} + +// GetTx mocks base method. +func (m *MockModule) GetTx(ctx context.Context, hash string) (*tx.GetTxResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTx", ctx, hash) + ret0, _ := ret[0].(*tx.GetTxResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTx indicates an expected call of GetTx. +func (mr *MockModuleMockRecorder) GetTx(ctx, hash interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockModule)(nil).GetTx), ctx, hash) +} + +// SimulateTx mocks base method. +func (m *MockModule) SimulateTx(ctx context.Context, txBytes []byte) (*tx.SimulateResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SimulateTx", ctx, txBytes) + ret0, _ := ret[0].(*tx.SimulateResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SimulateTx indicates an expected call of SimulateTx. +func (mr *MockModuleMockRecorder) SimulateTx(ctx, txBytes interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulateTx", reflect.TypeOf((*MockModule)(nil).SimulateTx), ctx, txBytes) +} diff --git a/pkg/lumera/options.go b/pkg/lumera/options.go index 862194ac..7bc5220e 100644 --- a/pkg/lumera/options.go +++ b/pkg/lumera/options.go @@ -1,5 +1,7 @@ package lumera +import "github.com/cosmos/cosmos-sdk/crypto/keyring" + // Option is a function that applies a change to Config type Option func(*Config) @@ -23,3 +25,10 @@ func WithTimeout(seconds int) Option { c.Timeout = seconds } } + +// WithKeyring sets the keyring conf for the node +func WithKeyring(k keyring.Keyring) Option { + return func(c *Config) { + c.keyring = k + } +} diff --git a/pkg/raptorq/config.go b/pkg/raptorq/config.go index 92203643..ad6d0dd3 100644 --- a/pkg/raptorq/config.go +++ b/pkg/raptorq/config.go @@ -15,6 +15,8 @@ type Config struct { // the queries port to listen for connections on Port int `mapstructure:"port" json:"port,omitempty"` + + RqFilesDir string `mapstructure:"rqfiles_dir" json:"rqfiles_dir,omitempty"` } // NewConfig returns a new Config instance. diff --git a/pkg/raptorq/connection.go b/pkg/raptorq/connection.go index dabcdb1f..0b4dcbee 100644 --- a/pkg/raptorq/connection.go +++ b/pkg/raptorq/connection.go @@ -1,6 +1,8 @@ package raptorq import ( + "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" "google.golang.org/grpc" ) @@ -11,8 +13,8 @@ type clientConn struct { id string } -func (conn *clientConn) RaptorQ(config *Config) RaptorQ { - return newRaptorQServerClient(conn, config) +func (conn *clientConn) RaptorQ(config *Config, lc lumera.Client, store rqstore.Store) RaptorQ { + return NewRaptorQServerClient(conn, config, lc, store) } func newClientConn(id string, conn *grpc.ClientConn) Connection { diff --git a/pkg/raptorq/gen_rq_identifier_files.go b/pkg/raptorq/gen_rq_identifier_files.go new file mode 100644 index 00000000..7497ccd2 --- /dev/null +++ b/pkg/raptorq/gen_rq_identifier_files.go @@ -0,0 +1,126 @@ +package raptorq + +import ( + "context" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/lumera" +) + +type GenRQIdentifiersFilesRequest struct { + TaskID string + BlockHash string + Data []byte + RqMax uint32 + CreatorSNAddress string + SignedData string + LC lumera.Client +} + +type GenRQIdentifiersFilesResponse struct { + RQIDsIc uint32 + RQIDs []string + RQIDsFiles [][]byte + RQIDsFile []byte + CreatorSignature []byte + RQEncodeParams EncoderParameters +} + +func (s *raptorQServerClient) GenRQIdentifiersFiles(ctx context.Context, req GenRQIdentifiersFilesRequest) ( + GenRQIdentifiersFilesResponse, error) { + + encodeInfo, err := s.encodeInfo(ctx, req.TaskID, req.Data, req.RqMax, req.BlockHash, req.CreatorSNAddress) + if err != nil { + return GenRQIdentifiersFilesResponse{}, errors.Errorf("error encoding info:%s", err.Error()) + } + + var genRQIDsRes generateRQIDsResponse + for i := range encodeInfo.SymbolIDFiles { + if len(encodeInfo.SymbolIDFiles[i].SymbolIdentifiers) == 0 { + return GenRQIdentifiersFilesResponse{}, errors.Errorf("empty raw file") + } + + rawRQIDFile := encodeInfo.SymbolIDFiles[i] + + genRQIDsRes, err = s.generateRQIDs(ctx, generateRQIDsRequest{ + req.LC, req.SignedData, rawRQIDFile, req.CreatorSNAddress, req.RqMax, + }) + if err != nil { + return GenRQIdentifiersFilesResponse{}, errors.Errorf("error generating rqids") + } + break + } + + return GenRQIdentifiersFilesResponse{ + RQIDsIc: genRQIDsRes.RQIDsIc, + RQIDs: genRQIDsRes.RQIDs, + RQIDsFiles: genRQIDsRes.RQIDsFiles, + RQIDsFile: genRQIDsRes.RQIDsFile, + RQEncodeParams: encodeInfo.EncoderParam, + CreatorSignature: genRQIDsRes.signature, + }, nil +} + +// // ValidateIDFiles validates received (IDs) file and its (50) IDs: +// // 1. checks signatures +// // 2. generates list of 50 IDs and compares them to received +// func (h *RegTaskHelper) ValidateIDFiles(ctx context.Context, +// data []byte, ic uint32, max uint32, ids []string, numSignRequired int, +// snAccAddresses []string, +// lumeraClient lumera.Client, +// creatorSignaure []byte, +// ) ([]byte, [][]byte, error) { + +// dec, err := utils.B64Decode(data) +// if err != nil { +// return nil, nil, errors.Errorf("decode data: %w", err) +// } + +// decData, err := utils.Decompress(dec) +// if err != nil { +// return nil, nil, errors.Errorf("decompress: %w", err) +// } + +// splits := bytes.Split(decData, []byte{SeparatorByte}) +// if len(splits) != numSignRequired+1 { +// return nil, nil, errors.New("invalid data") +// } + +// file, err := utils.B64Decode(splits[0]) +// if err != nil { +// return nil, nil, errors.Errorf("decode file: %w", err) +// } + +// verifications := 0 +// verifiedNodes := make(map[int]bool) +// for i := 1; i < numSignRequired+1; i++ { +// for j := 0; j < len(snAccAddresses); j++ { +// if _, ok := verifiedNodes[j]; ok { +// continue +// } + +// err := lumeraClient.Node().Verify(snAccAddresses[j], file, creatorSignaure) // TODO : verify the signature +// if err != nil { +// return nil, nil, errors.Errorf("verify file signature %w", err) +// } + +// verifiedNodes[j] = true +// verifications++ +// break +// } +// } + +// if verifications != numSignRequired { +// return nil, nil, errors.Errorf("file verification failed: need %d verifications, got %d", numSignRequired, verifications) +// } + +// gotIDs, idFiles, err := raptorq.GetIDFiles(ctx, decData, ic, max) +// if err != nil { +// return nil, nil, errors.Errorf("get ids: %w", err) +// } + +// if err := utils.EqualStrList(gotIDs, ids); err != nil { +// return nil, nil, errors.Errorf("IDs don't match: %w", err) +// } + +// return file, idFiles, nil +// } diff --git a/pkg/raptorq/helper.go b/pkg/raptorq/helper.go new file mode 100644 index 00000000..1924c391 --- /dev/null +++ b/pkg/raptorq/helper.go @@ -0,0 +1,183 @@ +package raptorq + +import ( + "bytes" + "context" + "encoding/json" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/LumeraProtocol/supernode/pkg/utils" + "github.com/cosmos/btcutil/base58" + "math/rand/v2" + "os" + "strconv" +) + +const ( + InputEncodeFileName = "input.data" + SeparatorByte byte = 46 // separator in dd_and_fingerprints.signature i.e. '.' +) + +// EncoderParameters represents the encoding params used by raptorq services +type EncoderParameters struct { + Oti []byte +} + +// EncodeInfo represents the response returns by encodeInfo method +type EncodeInfo struct { + SymbolIDFiles map[string]RawSymbolIDFile + EncoderParam EncoderParameters +} + +// Encode represents the response returns by Encode method +type Encode struct { + Symbols map[string][]byte + EncoderParam EncoderParameters +} + +// Decode represents the response returns by Decode method +type Decode struct { + File []byte +} + +func (s *raptorQServerClient) encodeInfo(ctx context.Context, taskID string, data []byte, copies uint32, blockHash string, pastelID string) (*EncodeInfo, error) { + s.semaphore <- struct{}{} // Acquire slot + defer func() { + <-s.semaphore // Release the semaphore slot + }() + + if data == nil { + return nil, errors.Errorf("invalid data") + } + + _, inputPath, err := createInputEncodeFile(s.config.RqFilesDir, data) + if err != nil { + return nil, errors.Errorf("create input file: %w", err) + } + res, err := s.EncodeMetaData(ctx, EncodeMetadataRequest{ + FilesNumber: copies, + BlockHash: blockHash, + PastelId: pastelID, + Path: inputPath, + }) + if err != nil { + return nil, errors.Errorf("encode metadata %s: %w", res.Path, err) + } + + filesMap, err := scanSymbolIDFiles(res.Path) + if err != nil { + return nil, errors.Errorf("scan symbol id files folder %s: %w", res.Path, err) + } + + if len(filesMap) != int(copies) { + return nil, errors.Errorf("symbol id files count not match: expect %d, output %d", copies, len(filesMap)) + } + + if err := s.store.StoreSymbolDirectory(taskID, res.Path); err != nil { + return nil, errors.Errorf("store symbol directory: %w", err) + } + + output := &EncodeInfo{ + SymbolIDFiles: filesMap, + EncoderParam: EncoderParameters{ + Oti: res.EncoderParameters, + }, + } + + if err := os.Remove(inputPath); err != nil { + logtrace.Error(ctx, "encode info: error removing input file", logtrace.Fields{"Path": inputPath}) + } + + return output, nil +} + +type generateRQIDsRequest struct { + lc lumera.Client + signedData string + rawFile RawSymbolIDFile + creatorAddress string + maxFiles uint32 +} + +type generateRQIDsResponse struct { + RQIDsIc uint32 + RQIDs []string + RQIDsFile []byte + RQIDsFiles [][]byte + signature []byte +} + +func (s *raptorQServerClient) generateRQIDs(ctx context.Context, req generateRQIDsRequest) (generateRQIDsResponse, error) { + // RQID file generated by supernode + rqIDsfile, err := json.Marshal(req.rawFile) + if err != nil { + return generateRQIDsResponse{}, errors.Errorf("marshal rqID file") + } + encRqIDsfile := utils.B64Encode(rqIDsfile) + + creatorSignature, err := ValidateRQIDs(req.lc, req.signedData, encRqIDsfile, req.rawFile.SymbolIdentifiers, req.creatorAddress) + if err != nil { + return generateRQIDsResponse{}, errors.Errorf("error validating RQIDs") + } + + var buffer bytes.Buffer + buffer.Write(encRqIDsfile) + buffer.WriteString(".") + buffer.Write(creatorSignature) + rqIDFile := buffer.Bytes() + + RQIDsIc := rand.Uint32() + RQIDs, RQIDsFiles, err := GetIDFiles(ctx, rqIDFile, RQIDsIc, req.maxFiles) + if err != nil { + return generateRQIDsResponse{}, errors.Errorf("get ID Files: %w", err) + } + + comp, err := utils.HighCompress(ctx, rqIDFile) + if err != nil { + return generateRQIDsResponse{}, errors.Errorf("compress: %w", err) + } + RQIDsFile := utils.B64Encode(comp) + + return generateRQIDsResponse{ + RQIDsIc: RQIDsIc, + RQIDs: RQIDs, + RQIDsFile: RQIDsFile, + RQIDsFiles: RQIDsFiles, + signature: creatorSignature, + }, nil +} + +// GetIDFiles generates ID Files for dd_and_fingerprints files and rq_id files +// file is b64 encoded file appended with signatures and compressed, ic is the initial counter +// and max is the number of ids to generate +func GetIDFiles(ctx context.Context, file []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) { + idFiles := make([][]byte, 0, max) + ids = make([]string, 0, max) + var buffer bytes.Buffer + + for i := uint32(0); i < max; i++ { + buffer.Reset() + counter := ic + i + + buffer.Write(file) + buffer.WriteByte(SeparatorByte) + buffer.WriteString(strconv.Itoa(int(counter))) // Using the string representation to maintain backward compatibility + + compressedData, err := utils.HighCompress(ctx, buffer.Bytes()) // Ensure you're using the same compression level + if err != nil { + return ids, idFiles, errors.Errorf("compress identifiers file: %w", err) + } + + idFiles = append(idFiles, compressedData) + + hash, err := utils.Sha3256hash(compressedData) + if err != nil { + return ids, idFiles, errors.Errorf("sha3-256-hash error getting an id file: %w", err) + } + + ids = append(ids, base58.Encode(hash)) + } + + return ids, idFiles, nil +} diff --git a/pkg/raptorq/interfaces.go b/pkg/raptorq/interfaces.go index 4993d2e5..654cd45e 100644 --- a/pkg/raptorq/interfaces.go +++ b/pkg/raptorq/interfaces.go @@ -1,9 +1,12 @@ -//go:generate mockgen -destination=rq_mock.go -package=raptorq -source=interface.go +//go:generate mockgen -destination=rq_mock.go -package=raptorq -source=interfaces.go package raptorq import ( "context" + + "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" ) // ClientInterface represents a base connection interface. @@ -18,7 +21,7 @@ type Connection interface { Close() error // RaptorQ returns a new RaptorQ stream. - RaptorQ(config *Config) RaptorQ + RaptorQ(config *Config, lc lumera.Client, store rqstore.Store) RaptorQ // FIXME: // Done returns a channel that's closed when connection is shutdown. @@ -33,4 +36,6 @@ type RaptorQ interface { Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) // EncodeMetaData Get encode info(include encode parameters + symbol id files) EncodeMetaData(ctx context.Context, req EncodeMetadataRequest) (EncodeResponse, error) + // GenRQIdentifiersFiles generates the RQ identifier files + GenRQIdentifiersFiles(ctx context.Context, req GenRQIdentifiersFilesRequest) (GenRQIdentifiersFilesResponse, error) } diff --git a/pkg/raptorq/raptorq.go b/pkg/raptorq/raptorq.go index 720d693d..0eb38e1c 100644 --- a/pkg/raptorq/raptorq.go +++ b/pkg/raptorq/raptorq.go @@ -1,12 +1,13 @@ package raptorq import ( - "github.com/LumeraProtocol/supernode/pkg/errors" "io/fs" "io/ioutil" "os" "path/filepath" + "github.com/LumeraProtocol/supernode/pkg/errors" + json "github.com/json-iterator/go" "github.com/google/uuid" diff --git a/pkg/raptorq/rq_mock.go b/pkg/raptorq/rq_mock.go index a61faf0c..25e4bd99 100644 --- a/pkg/raptorq/rq_mock.go +++ b/pkg/raptorq/rq_mock.go @@ -8,6 +8,8 @@ import ( context "context" reflect "reflect" + lumera "github.com/LumeraProtocol/supernode/pkg/lumera" + rqstore "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" gomock "github.com/golang/mock/gomock" ) @@ -87,17 +89,17 @@ func (mr *MockConnectionMockRecorder) Close() *gomock.Call { } // RaptorQ mocks base method. -func (m *MockConnection) RaptorQ(config *Config) RaptorQ { +func (m *MockConnection) RaptorQ(config *Config, lc lumera.Client, store rqstore.Store) RaptorQ { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RaptorQ", config) + ret := m.ctrl.Call(m, "RaptorQ", config, lc, store) ret0, _ := ret[0].(RaptorQ) return ret0 } // RaptorQ indicates an expected call of RaptorQ. -func (mr *MockConnectionMockRecorder) RaptorQ(config interface{}) *gomock.Call { +func (mr *MockConnectionMockRecorder) RaptorQ(config, lc, store interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaptorQ", reflect.TypeOf((*MockConnection)(nil).RaptorQ), config) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaptorQ", reflect.TypeOf((*MockConnection)(nil).RaptorQ), config, lc, store) } // MockRaptorQ is a mock of RaptorQ interface. @@ -167,3 +169,18 @@ func (mr *MockRaptorQMockRecorder) EncodeMetaData(ctx, req interface{}) *gomock. mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EncodeMetaData", reflect.TypeOf((*MockRaptorQ)(nil).EncodeMetaData), ctx, req) } + +// GenRQIdentifiersFiles mocks base method. +func (m *MockRaptorQ) GenRQIdentifiersFiles(ctx context.Context, req GenRQIdentifiersFilesRequest) (GenRQIdentifiersFilesResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GenRQIdentifiersFiles", ctx, req) + ret0, _ := ret[0].(GenRQIdentifiersFilesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GenRQIdentifiersFiles indicates an expected call of GenRQIdentifiersFiles. +func (mr *MockRaptorQMockRecorder) GenRQIdentifiersFiles(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenRQIdentifiersFiles", reflect.TypeOf((*MockRaptorQ)(nil).GenRQIdentifiersFiles), ctx, req) +} diff --git a/pkg/raptorq/rq_server_client.go b/pkg/raptorq/rq_server_client.go index 1d0341ab..9acb7749 100644 --- a/pkg/raptorq/rq_server_client.go +++ b/pkg/raptorq/rq_server_client.go @@ -4,6 +4,8 @@ import ( "time" rq "github.com/LumeraProtocol/supernode/gen/raptorq" + "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" ) const ( @@ -12,18 +14,25 @@ const ( ) type raptorQServerClient struct { - config *Config - conn *clientConn - rqService rq.RaptorQClient - semaphore chan struct{} // Semaphore to control concurrency + config *Config + conn *clientConn + rqService rq.RaptorQClient + lumeraClient lumera.Client + store rqstore.Store + semaphore chan struct{} // Semaphore to control concurrency } -func newRaptorQServerClient(conn *clientConn, config *Config) RaptorQ { +func NewRaptorQServerClient(conn *clientConn, + config *Config, + lc lumera.Client, + store rqstore.Store) RaptorQ { return &raptorQServerClient{ - conn: conn, - rqService: rq.NewRaptorQClient(conn), - config: config, - semaphore: make(chan struct{}, concurrency), + conn: conn, + rqService: rq.NewRaptorQClient(conn), + lumeraClient: lc, + store: store, + config: config, + semaphore: make(chan struct{}, concurrency), } } diff --git a/pkg/raptorq/valdate_rqids.go b/pkg/raptorq/valdate_rqids.go new file mode 100644 index 00000000..db50f870 --- /dev/null +++ b/pkg/raptorq/valdate_rqids.go @@ -0,0 +1,45 @@ +package raptorq + +import ( + "strings" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/LumeraProtocol/supernode/pkg/utils" + json "github.com/json-iterator/go" +) + +func ValidateRQIDs(lc lumera.Client, signedData string, encRQIDsFileBySN []byte, symbolIdentifiers []string, creatorAddress string) (creatorSignature []byte, err error) { + parts := strings.SplitN(signedData, ".", 2) // b64Encode(encodedData).Signature + if len(parts) != 2 { + return nil, errors.Errorf("sign identifiers file: %w", err) + } + + // received from the action details + encodedRqids := parts[0] + creatorSignature = []byte(parts[1]) + + // Decode the base64 encoded rqids file received in the action request + rqidsFileBytesByCreator, err := utils.B64Decode([]byte(encodedRqids)) + if err != nil { + return nil, errors.Errorf("sign identifiers file: %w", err) + } + + // Verify signature against the encodedRQIDFile generated by the supernode + err = lc.Node().Verify(creatorAddress, encRQIDsFileBySN, creatorSignature) + if err != nil { + return nil, errors.Errorf("sign identifiers file: %w", err) + } + + var actionInputRqIDsRawFile RawSymbolIDFile + err = json.Unmarshal(rqidsFileBytesByCreator, &actionInputRqIDsRawFile) + if err != nil { + return nil, errors.Errorf("sign identifiers file: %w", err) + } + + if err := utils.EqualStrList(symbolIdentifiers, actionInputRqIDsRawFile.SymbolIdentifiers); err != nil { + return nil, errors.Errorf("error matching rq-ids") + } + + return creatorSignature, nil +} diff --git a/pkg/storage/file_storage_interface.go b/pkg/storage/file_storage_interface.go new file mode 100644 index 00000000..faa1b0ca --- /dev/null +++ b/pkg/storage/file_storage_interface.go @@ -0,0 +1,45 @@ +//go:generate mockery --name=FileStorageInterface +//go:generate mockery --name=FileInterface + +package storage + +import ( + "io" + + "github.com/LumeraProtocol/supernode/pkg/errors" +) + +var ( + // ErrFileNotFound is returned when file isn't found. + ErrFileNotFound = errors.New("file not found") + // ErrFileExists is returned when file already exists. + ErrFileExists = errors.New("file exists") +) + +// FileStorageInterface represents a file storage. +type FileStorageInterface interface { + // Open opens a file and returns file descriptor. + // If name is not found, ErrFileNotFound is returned. + Open(name string) (file FileInterface, err error) + + // Create creates a new file with the given name and returns file descriptor. + Create(name string) (file FileInterface, err error) + + // Remove removes a file by the given name. + Remove(name string) error + + // Rename renames oldname to newname. + Rename(oldname, newname string) error +} + +// FileInterface represents a file. +type FileInterface interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt + + Name() string +} diff --git a/pkg/storage/files/file.go b/pkg/storage/files/file.go new file mode 100644 index 00000000..d304abe5 --- /dev/null +++ b/pkg/storage/files/file.go @@ -0,0 +1,382 @@ +package files + +import ( + "bytes" + "fmt" + "image" + "image/gif" + "image/jpeg" + "image/png" + "io" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/storage" + + "github.com/disintegration/imaging" + "github.com/kolesa-team/go-webp/decoder" + "github.com/kolesa-team/go-webp/encoder" + "github.com/kolesa-team/go-webp/webp" +) + +// File represents a file. +type File struct { + fmt.Stringer + sync.Mutex + + storage.FileInterface + storage *Storage + + // if a file was created during the process, it should be deleted at the end. + isCreated bool + + // unique name within the storage. + name string + + // file format, png, jpg, etc. + format Format +} + +// Name returns filename. +func (file *File) Name() string { + return file.name +} + +func (file *File) String() string { + return file.name +} + +// SetFormatFromExtension parses and sets image format from filename extension: +// "jpg" (or "jpeg"), "png", "gif" are supported. +func (file *File) SetFormatFromExtension(ext string) error { + if format, ok := formatExts[strings.ToLower(strings.TrimPrefix(ext, "."))]; ok { + return file.SetFormat(format) + } + return ErrUnsupportedFormat +} + +// SetFormat sets file extension. +func (file *File) SetFormat(format Format) error { + file.format = format + + newname := fmt.Sprintf("%s.%s", strings.TrimSuffix(file.name, filepath.Ext(file.name)), format) + oldname := file.name + file.name = newname + + if err := file.storage.Update(oldname, newname, file); err != nil { + return err + } + + if !file.isCreated { + return nil + } + return file.storage.Rename(oldname, newname) +} + +// Format returns file extension. +func (file *File) Format() Format { + return file.format +} + +// Open opens a file and returns file descriptor. +// If file is not found, storage.ErrFileNotFound is returned. +func (file *File) Open() (storage.FileInterface, error) { + file.Lock() + defer file.Unlock() + + return file.storage.Open(file.Name()) +} + +// Create creates a file and returns file descriptor. +func (file *File) Create() (storage.FileInterface, error) { + file.Lock() + defer file.Unlock() + + fl, err := file.storage.Create(file.name) + if err != nil { + return nil, err + } + + file.isCreated = true + return fl, nil +} + +// Remove removes the file. +func (file *File) Remove() error { + file.Lock() + defer file.Unlock() + + delete(file.storage.filesMap, file.name) + + if !file.isCreated { + return nil + } + file.isCreated = false + + return file.storage.Remove(file.name) +} + +// Copy creates a copy of the current file. +func (file *File) Copy() (*File, error) { + src, err := file.Open() + if err != nil { + return nil, err + } + defer src.Close() + + newFile := file.storage.NewFile() + if err := newFile.SetFormat(file.format); err != nil { + return nil, err + } + + dst, err := newFile.Create() + if err != nil { + return nil, err + } + defer dst.Close() + + if _, err := io.Copy(dst, src); err != nil { + return nil, errors.Errorf("copy file: %w", err) + } + return newFile, nil +} + +// Bytes returns the contents of the file by bytes. +func (file *File) Bytes() ([]byte, error) { + f, err := file.Open() + if err != nil { + return nil, err + } + defer f.Close() + + buf := new(bytes.Buffer) + if _, err := buf.ReadFrom(f); err != nil { + return nil, errors.Errorf("read file: %w", err) + } + + return buf.Bytes(), nil +} + +// Write writes data to the file. +func (file *File) Write(data []byte) (n int, err error) { + f, err := file.Create() + if err != nil { + return 0, errors.Errorf("create file: %w", err) + } + defer f.Close() + + n, err = f.Write(data) + if err != nil { + return n, errors.Errorf("write file: %w", err) + } + + return +} + +// ResizeImage resizes image. +func (file *File) ResizeImage(width, height int) error { + src, err := file.LoadImage() + if err != nil { + return err + } + + dst := imaging.Resize(src, width, height, imaging.Lanczos) + + return file.SaveImage(dst) +} + +// RemoveAfter removes the file after the specified duration. +func (file *File) RemoveAfter(d time.Duration) { + go func() { + time.AfterFunc(d, func() { file.Remove() }) + }() +} + +// LoadImage opens images from the file. +func (file *File) LoadImage() (image.Image, error) { + f, err := file.Open() + if err != nil { + return nil, err + } + defer f.Close() + + img, _, err := image.Decode(f) + if err != nil { + // Reset the reader to the beginning of the file + _, errSeek := f.Seek(0, io.SeekStart) + if errSeek != nil { + return nil, errors.Errorf("reset file reader: %w", errSeek) + } + + var errWebp error + img, errWebp = webp.Decode(f, &decoder.Options{}) + if errWebp != nil { + return nil, errors.Errorf("decode image(%s) - %w - tried webp as well: %w", f.Name(), err, errWebp) + } + } + + return img, nil +} + +// SaveImage saves image to the file. +func (file *File) SaveImage(img image.Image) error { + f, err := file.Create() + if err != nil { + return err + } + defer f.Close() + + switch file.format { + case JPEG: + if nrgba, ok := img.(*image.NRGBA); ok && nrgba.Opaque() { + rgba := &image.RGBA{ + Pix: nrgba.Pix, + Stride: nrgba.Stride, + Rect: nrgba.Rect, + } + if err := jpeg.Encode(f, rgba, nil); err != nil { + return errors.Errorf("encode jpeg rgba(%s): %w", f.Name(), err) + } + return nil + } + if err := jpeg.Encode(f, img, nil); err != nil { + return errors.Errorf("encode jpeg(%s): %w", f.Name(), err) + } + return nil + + case PNG: + encoder := png.Encoder{CompressionLevel: png.DefaultCompression} + if err := encoder.Encode(f, img); err != nil { + return errors.Errorf("encode png(%s): %w", f.Name(), err) + } + return nil + + case GIF: + if err := gif.Encode(f, img, nil); err != nil { + return errors.Errorf("encode gif(%s): %w", f.Name(), err) + } + return nil + case WEBP: + opts, err := encoder.NewLosslessEncoderOptions(encoder.PresetDefault, 0) + if err != nil { + return errors.Errorf("create lossless encoder option %w", err) + } + if err := webp.Encode(f, img, opts); err != nil { + return errors.Errorf("encode webp(%s): %w", f.Name(), err) + } + return nil + + } + + return ErrUnsupportedFormat +} + +// Thumbnail creates a thumbnail file from the NFT file and store in to storage layer +func (file *File) Thumbnail(coordinate ThumbnailCoordinate) (*File, error) { + f := NewFile(file.storage, "thumbnail-of-"+file.name) + if f == nil { + return nil, errors.Errorf("create new file for thumbnail-of-%q", file.Name()) + } + if err := f.SetFormat(file.Format()); err != nil { + return nil, errors.Errorf("set format for thumbnail-of-%q", file.Name()) + } + + img, err := file.LoadImage() + if err != nil { + return nil, errors.Errorf("load image from file(%s): %w", file.Name(), err) + } + + rect := image.Rect(int(coordinate.TopLeftX), int(coordinate.TopLeftY), int(coordinate.BottomRightX), int(coordinate.BottomRightY)) + thumbnail := imaging.Crop(img, rect) + if thumbnail == nil { + return nil, errors.Errorf("generate thumbnail(%s): %w", file.Name(), err) + } + + if err := f.SaveImage(thumbnail); err != nil { + return nil, errors.Errorf("save thumbnail(%s): %w", file.Name(), err) + } + + return f, nil +} + +// UpdateFormat updates file format +func (file *File) UpdateFormat() error { + f, err := file.Open() + if err != nil { + return err + } + defer f.Close() + + // Try decoding with the standard library first + _, format, err := image.Decode(f) + if err != nil { + // If standard decoding fails, reset the reader and try WebP decoding + _, errSeek := f.Seek(0, io.SeekStart) + if errSeek != nil { + return errors.Errorf("reset file reader: %w", errSeek) + } + + _, errWebp := webp.Decode(f, &decoder.Options{}) + if errWebp != nil { + return errors.Errorf("decode image(%s) in updateFormat - tried webp as well: %w", f.Name(), errWebp) + } + format = "webp" + } + + err = file.SetFormatFromExtension(format) + if err != nil { + log.WithError(err).Error(fmt.Sprintf("not able to set extension:%s", err.Error())) + return errors.Errorf("set file format(%s): %w", file.Name(), err) + } + + return nil +} + +// Encoder represents an image encoder. +type Encoder interface { + Encode(img image.Image) (image.Image, error) +} + +// Encode encodes the image by the given encoder. +func (file *File) Encode(enc Encoder) error { + img, err := file.LoadImage() + if err != nil { + return fmt.Errorf("load image: %w", err) + } + + encImg, err := enc.Encode(img) + if err != nil { + return fmt.Errorf("common encode image: %w", err) + } + return file.SaveImage(encImg) +} + +// Decoder represents an image decoder. +type Decoder interface { + Decode(img image.Image) error +} + +// Decode decodes the image by the given decoder. +func (file *File) Decode(dec Decoder) error { + img, err := file.LoadImage() + if err != nil { + return err + } + if err := dec.Decode(img); err != nil { + return fmt.Errorf("common decode image: %w", err) + } + + return nil +} + +// NewFile returns a newFile File instance. +func NewFile(storage *Storage, name string) *File { + return &File{ + storage: storage, + name: name, + } +} diff --git a/pkg/storage/files/format.go b/pkg/storage/files/format.go new file mode 100644 index 00000000..5a54fc23 --- /dev/null +++ b/pkg/storage/files/format.go @@ -0,0 +1,36 @@ +package files + +import ( + "github.com/LumeraProtocol/supernode/pkg/errors" +) + +// ErrUnsupportedFormat means the given image format is not supported. +var ErrUnsupportedFormat = errors.New("imaging: unsupported image format") + +// Image file formats. +const ( + JPEG Format = iota + PNG + GIF + WEBP +) + +var formatExts = map[string]Format{ + "jpg": JPEG, + "jpeg": JPEG, + "png": PNG, + "webp": WEBP, +} + +var formatNames = map[Format]string{ + JPEG: "jpeg", + PNG: "png", + WEBP: "webp", +} + +// Format is an image file format. +type Format int + +func (f Format) String() string { + return formatNames[f] +} diff --git a/pkg/storage/files/storage.go b/pkg/storage/files/storage.go new file mode 100644 index 00000000..9ce1d4e7 --- /dev/null +++ b/pkg/storage/files/storage.go @@ -0,0 +1,82 @@ +package files + +import ( + "context" + "fmt" + "sync/atomic" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/random" + "github.com/LumeraProtocol/supernode/pkg/storage" +) + +// Storage represents a file storage. +type Storage struct { + storage.FileStorageInterface + + idCounter int64 + prefix string + filesMap map[string]*File +} + +// Run removes all files when the context is canceled. +func (storage *Storage) Run(ctx context.Context) error { + <-ctx.Done() + + var errs error + for _, file := range storage.filesMap { + if err := file.Remove(); err != nil { + errs = errors.Append(errs, err) + } + } + + return errs +} + +// NewFile returns a new File instance with a unique name. +func (storage *Storage) NewFile() *File { + id := atomic.AddInt64(&storage.idCounter, 1) + name := fmt.Sprintf("%s-%d", storage.prefix, id) + + file := NewFile(storage, name) + storage.filesMap[name] = file + + return file +} + +// File returns File by the given name. +func (storage *Storage) File(name string) (*File, error) { + file, ok := storage.filesMap[name] + if !ok { + return nil, errors.New("image not found") + } + return file, nil +} + +// Update changes the key to identify a *File to a new key +func (storage *Storage) Update(oldname, newname string, file *File) error { + f, ok := storage.filesMap[oldname] + if !ok { + return errors.New("file not found") + } + + if f != file { + return errors.New("not the same file") + } + + delete(storage.filesMap, oldname) + storage.filesMap[newname] = file + return nil +} + +// NewStorage returns a new Storage instance. +func NewStorage(storage storage.FileStorageInterface) *Storage { + prefix, _ := random.String(8, random.Base62Chars) + + return &Storage{ + FileStorageInterface: storage, + + prefix: prefix, + filesMap: make(map[string]*File), + } +} diff --git a/pkg/storage/files/storage_test.go b/pkg/storage/files/storage_test.go new file mode 100644 index 00000000..43087f95 --- /dev/null +++ b/pkg/storage/files/storage_test.go @@ -0,0 +1,37 @@ +package files + +import ( + "os" + "path/filepath" + "testing" + + "github.com/LumeraProtocol/supernode/pkg/storage/fs" + "github.com/stretchr/testify/assert" +) + +func Test_StoreFileAfterSetFormat(t *testing.T) { + storage := NewStorage(fs.NewFileStorage(os.TempDir())) + + files := []struct { + name string + format Format + }{ + {"test.jpeg", JPEG}, + {"test.jpg", JPEG}, + {"test.png", PNG}, + {"test.webp", WEBP}, + } + + for _, file := range files { + f := storage.NewFile() + assert.NotNil(t, f) + + // + err := f.SetFormatFromExtension(filepath.Ext(file.name)) + assert.Equal(t, nil, err) + assert.Equal(t, file.format, f.format) + + _, err = storage.File(f.Name()) + assert.Equal(t, nil, err) + } +} diff --git a/pkg/storage/files/thumbnail.go b/pkg/storage/files/thumbnail.go new file mode 100644 index 00000000..b747c701 --- /dev/null +++ b/pkg/storage/files/thumbnail.go @@ -0,0 +1,9 @@ +package files + +// ThumbnailCoordinate contains coordinate of region crop by user +type ThumbnailCoordinate struct { + TopLeftX int64 `json:"top_left_x"` + TopLeftY int64 `json:"top_left_y"` + BottomRightX int64 `json:"bottom_right_x"` + BottomRightY int64 `json:"bottom_right_y"` +} diff --git a/pkg/storage/fs/file.go b/pkg/storage/fs/file.go new file mode 100644 index 00000000..fe9597b8 --- /dev/null +++ b/pkg/storage/fs/file.go @@ -0,0 +1,87 @@ +package fs + +import ( + "os" + "path/filepath" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/storage" +) + +const ( + logPrefix = "storage-fs" +) + +// FS represents file system storage. +type FS struct { + dir string +} + +// Open implements storage.FileStorageInterface.Open +func (fs *FS) Open(filename string) (storage.FileInterface, error) { + filename = filepath.Join(fs.dir, filename) + + if _, err := os.Stat(filename); os.IsNotExist(err) { + return nil, storage.ErrFileNotFound + } + + file, err := os.Open(filename) + if err != nil { + return nil, errors.Errorf("open file %q: %w", filename, err) + } + return file, nil +} + +// Create implements storage.FileStorageInterface.Create +func (fs *FS) Create(filename string) (storage.FileInterface, error) { + filename = filepath.Join(fs.dir, filename) + + if _, err := os.Stat(filename); !os.IsNotExist(err) { + log.WithPrefix(logPrefix).Debugf("Rewrite file %q", filename) + } else { + log.WithPrefix(logPrefix).Debugf("Create file %q", filename) + } + + file, err := os.Create(filename) + if err != nil { + return nil, errors.Errorf("create file %q: %w", filename, err) + } + return file, nil +} + +// Remove implements storage.FileStorageInterface.Remove +func (fs *FS) Remove(filename string) error { + filename = filepath.Join(fs.dir, filename) + + log.WithPrefix(logPrefix).Debugf("Remove file %q", filename) + + if err := os.Remove(filename); err != nil { + return errors.Errorf("remove file %q: %w", filename, err) + } + return nil +} + +// Rename renames oldName to newName. +func (fs *FS) Rename(oldname, newname string) error { + if oldname == newname { + return nil + } + + oldname = filepath.Join(fs.dir, oldname) + newname = filepath.Join(fs.dir, newname) + + log.WithPrefix(logPrefix).Debugf("Rename file %q to %q", oldname, newname) + + if err := os.Rename(oldname, newname); err != nil { + return errors.Errorf("rename file %q to %q: %w", oldname, newname, err) + } + return nil +} + +// NewFileStorage returns new FS instance. Where `dir` is the path for storing files. +func NewFileStorage(dir string) storage.FileStorageInterface { + return &FS{ + dir: dir, + } +} diff --git a/pkg/storage/fs/file_test.go b/pkg/storage/fs/file_test.go new file mode 100644 index 00000000..955e590f --- /dev/null +++ b/pkg/storage/fs/file_test.go @@ -0,0 +1,168 @@ +package fs + +import ( + "fmt" + "os" + "testing" + + "github.com/LumeraProtocol/supernode/pkg/storage" + + "github.com/stretchr/testify/assert" +) + +func TestFSOpen(t *testing.T) { + t.Parallel() + + type args struct { + filename string + } + + type handleFunc func(dir string, t assert.TestingT) + + testCases := []struct { + args args + createfunc handleFunc + assertion assert.ErrorAssertionFunc + valueAssert assert.ValueAssertionFunc + }{ + { + args: args{"test.txt"}, + assertion: assert.NoError, + valueAssert: assert.NotNil, + createfunc: func(dir string, t assert.TestingT) { + fs := &FS{ + dir: dir, + } + + _, err := fs.Create("test.txt") + assert.NoError(t, err) + }, + }, { + args: args{"non-exit.txt"}, + assertion: assert.Error, + valueAssert: assert.Nil, + createfunc: func(dir string, t assert.TestingT) {}, + }, + } + + for i, testCase := range testCases { + testCase := testCase + + t.Run("group", func(t *testing.T) { + dir, _ := os.MkdirTemp("", "*") + defer os.RemoveAll(dir) + + t.Run(fmt.Sprintf("testCase-%d", i), func(t *testing.T) { + testCase.createfunc(dir, t) + fs := &FS{dir: dir} + + got, err := fs.Open(testCase.args.filename) + testCase.assertion(t, err) + testCase.valueAssert(t, got) + }) + }) + + } +} + +func TestFSCreate(t *testing.T) { + t.Parallel() + + type args struct { + filename string + } + + testCases := []struct { + args args + assertion assert.ErrorAssertionFunc + }{ + { + args: args{"test-1.txt"}, + assertion: assert.NoError, + }, + } + + t.Run("group", func(t *testing.T) { + dir, _ := os.MkdirTemp("", "*") + defer os.RemoveAll(dir) + + for i, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("testCase-%d", i), func(t *testing.T) { + fs := &FS{ + dir: dir, + } + got, err := fs.Create(testCase.args.filename) + testCase.assertion(t, err) + assert.NotNil(t, got) + assert.FileExists(t, fmt.Sprintf("%s/%s", dir, testCase.args.filename)) + }) + } + }) +} + +func TestFSRemove(t *testing.T) { + t.Parallel() + + type args struct { + filename string + } + + testCases := []struct { + args args + assertion assert.ErrorAssertionFunc + }{ + { + args: args{"test-2.txt"}, + assertion: assert.NoError, + }, + } + + t.Run("group", func(t *testing.T) { + dir, _ := os.MkdirTemp("", "*") + defer os.RemoveAll(dir) + + for i, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("testCase-%d", i), func(t *testing.T) { + fs := &FS{ + dir: dir, + } + + _, err := fs.Create(testCase.args.filename) + assert.NoError(t, err) + + testCase.assertion(t, fs.Remove(testCase.args.filename)) + }) + } + + }) +} + +func TestNewFileStorage(t *testing.T) { + t.Parallel() + + type args struct { + dir string + } + + testCases := []struct { + args args + want storage.FileStorageInterface + }{ + { + args: args{"./"}, + want: &FS{dir: "./"}, + }, + } + + for i, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("testCase-%d", i), func(t *testing.T) { + assert.Equal(t, testCase.want, NewFileStorage(testCase.args.dir)) + }) + } +} diff --git a/pkg/storage/queries/health_check.go b/pkg/storage/queries/health_check.go new file mode 100644 index 00000000..c47db8cf --- /dev/null +++ b/pkg/storage/queries/health_check.go @@ -0,0 +1,430 @@ +package queries + +import ( + "context" + "fmt" + "time" + + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/types" + "github.com/LumeraProtocol/supernode/pkg/utils/metrics" + json "github.com/json-iterator/go" +) + +type HealthCheckChallengeQueries interface { + InsertHealthCheckChallengeMessage(challenge types.HealthCheckChallengeLogMessage) error + InsertBroadcastHealthCheckMessage(challenge types.BroadcastHealthCheckLogMessage) error + QueryHCChallengeMessage(challengeID string, messageType int) (challengeMessage types.HealthCheckChallengeLogMessage, err error) + GetHealthCheckChallengeMetricsByChallengeID(challengeID string) ([]types.HealthCheckChallengeLogMessage, error) + + GetHCMetricsByChallengeIDAndMessageType(challengeID string, messageType types.HealthCheckMessageType) ([]types.HealthCheckChallengeLogMessage, error) + BatchInsertHCMetrics(metrics []types.HealthCheckChallengeLogMessage) error + HealthCheckChallengeMetrics(timestamp time.Time) ([]types.HealthCheckChallengeLogMessage, error) + InsertHealthCheckChallengeMetric(metric types.HealthCheckChallengeMetric) error + GetHCSummaryStats(from time.Time) (hcMetrics metrics.HCMetrics, err error) + GetTotalHCGeneratedAndProcessedAndEvaluated(from time.Time) (metrics.HCMetrics, error) + GetMetricsDataByHealthCheckChallengeID(ctx context.Context, challengeID string) ([]types.HealthCheckMessage, error) + GetLastNHCMetrics() ([]types.NHcMetric, error) + + GetDistinctHCChallengeIDsCountForScoreAggregation(after, before time.Time) (int, error) + GetDistinctHCChallengeIDs(after, before time.Time, batchNumber int) ([]string, error) +} + +// GetTotalHCGeneratedAndProcessedAndEvaluated retrieves the total health-check challenges generated/processed/evaluated +func (s *SQLiteStore) GetTotalHCGeneratedAndProcessedAndEvaluated(from time.Time) (metrics.HCMetrics, error) { + metrics := metrics.HCMetrics{} + + // Query for total number of challenges + totalChallengeQuery := "SELECT COUNT(DISTINCT challenge_id) FROM healthcheck_challenge_metrics WHERE message_type = 1 AND created_at > ?" + err := s.db.QueryRow(totalChallengeQuery, from).Scan(&metrics.TotalChallenges) + if err != nil { + return metrics, err + } + + // Query for total challenges responded + totalChallengesProcessedQuery := "SELECT COUNT(DISTINCT challenge_id) FROM healthcheck_challenge_metrics WHERE message_type = 2 AND created_at > ?" + err = s.db.QueryRow(totalChallengesProcessedQuery, from).Scan(&metrics.TotalChallengesProcessed) + if err != nil { + return metrics, err + } + + totalChallengesEvaluatedQuery := "SELECT COUNT(DISTINCT challenge_id) FROM healthcheck_challenge_metrics WHERE message_type = 3 AND created_at > ?" + err = s.db.QueryRow(totalChallengesEvaluatedQuery, from).Scan(&metrics.TotalChallengesEvaluatedByChallenger) + if err != nil { + return metrics, err + } + + return metrics, nil +} + +// GetHCObserversEvaluations retrieves the observer's evaluations +func (s *SQLiteStore) GetHCObserversEvaluations(from time.Time) ([]types.HealthCheckChallengeLogMessage, error) { + var messages []types.HealthCheckChallengeLogMessage + + query := "SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at FROM healthcheck_challenge_metrics WHERE message_type = 4 and created_at > ?" + rows, err := s.db.Query(query, from) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var msg types.HealthCheckChallengeLogMessage + err := rows.Scan(&msg.ID, &msg.ChallengeID, &msg.MessageType, &msg.Data, &msg.Sender, &msg.CreatedAt, &msg.UpdatedAt) + if err != nil { + return nil, err + } + messages = append(messages, msg) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return messages, nil +} + +// GetHCSummaryStats get health-check summary stats +func (s *SQLiteStore) GetHCSummaryStats(from time.Time) (hcMetrics metrics.HCMetrics, err error) { + hcStats := metrics.HCMetrics{} + hcMetrics, err = s.GetTotalHCGeneratedAndProcessedAndEvaluated(from) + if err != nil { + return hcMetrics, err + } + hcStats.TotalChallenges = hcMetrics.TotalChallenges + hcStats.TotalChallengesProcessed = hcMetrics.TotalChallengesProcessed + hcStats.TotalChallengesEvaluatedByChallenger = hcMetrics.TotalChallengesEvaluatedByChallenger + + hcObserversEvaluations, err := s.GetHCObserversEvaluations(from) + if err != nil { + return hcMetrics, err + } + log.WithField("observer_evaluations", len(hcObserversEvaluations)).Info("observer evaluations retrieved") + + observerEvaluationMetrics := processHCObserverEvaluations(hcObserversEvaluations) + log.WithField("observer_evaluation_metrics", len(observerEvaluationMetrics)).Info("observer evaluation metrics retrieved") + + for _, obMetrics := range observerEvaluationMetrics { + if obMetrics.ChallengesVerified >= 3 { + hcMetrics.TotalChallengesVerified++ + } else { + if obMetrics.FailedByInvalidTimestamps > 0 { + hcMetrics.SlowResponsesObservedByObservers++ + } + if obMetrics.FailedByInvalidSignatures > 0 { + hcMetrics.InvalidSignaturesObservedByObservers++ + } + if obMetrics.FailedByInvalidEvaluation > 0 { + hcMetrics.InvalidEvaluationObservedByObservers++ + } + } + } + + return hcMetrics, nil +} + +// GetHealthCheckChallengeMetricsByChallengeID gets the health-check challenge by ID +func (s *SQLiteStore) GetHealthCheckChallengeMetricsByChallengeID(challengeID string) ([]types.HealthCheckChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM healthcheck_challenge_metrics + WHERE challenge_id = ?;` + + rows, err := s.db.Query(query, challengeID) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.HealthCheckChallengeLogMessage + for rows.Next() { + var m types.HealthCheckChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetMetricsDataByHealthCheckChallengeID gets the metrics data by health-check challenge id +func (s *SQLiteStore) GetMetricsDataByHealthCheckChallengeID(ctx context.Context, challengeID string) (healthCheckChallengeMessages []types.HealthCheckMessage, err error) { + hcMetrics, err := s.GetHealthCheckChallengeMetricsByChallengeID(challengeID) + if err != nil { + return healthCheckChallengeMessages, err + } + log.WithContext(ctx).WithField("rows", len(hcMetrics)).Info("health-check-challenge metrics row count") + + for _, hcMetric := range hcMetrics { + msg := types.HealthCheckMessageData{} + if err := json.Unmarshal(hcMetric.Data, &msg); err != nil { + return healthCheckChallengeMessages, fmt.Errorf("cannot unmarshal health check challenge data: %w", err) + } + + healthCheckChallengeMessages = append(healthCheckChallengeMessages, types.HealthCheckMessage{ + ChallengeID: hcMetric.ChallengeID, + MessageType: types.HealthCheckMessageType(hcMetric.MessageType), + Sender: hcMetric.Sender, + SenderSignature: hcMetric.SenderSignature, + Data: msg, + }) + } + + return healthCheckChallengeMessages, nil +} + +// InsertHealthCheckChallengeMessage inserts failed healthcheck challenge to db +func (s *SQLiteStore) InsertHealthCheckChallengeMessage(challenge types.HealthCheckChallengeLogMessage) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO healthcheck_challenge_messages(id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?);" + _, err := s.db.Exec(insertQuery, challenge.ChallengeID, challenge.MessageType, challenge.Data, challenge.Sender, challenge.SenderSignature, now, now) + + if err != nil { + return err + } + + return nil +} + +// InsertHealthCheckChallengeMetric inserts the health-check challenge metrics +func (s *SQLiteStore) InsertHealthCheckChallengeMetric(m types.HealthCheckChallengeMetric) error { + now := time.Now().UTC() + + const metricsQuery = "INSERT INTO healthcheck_challenge_metrics(id, challenge_id, message_type, data, sender_id, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?) ON CONFLICT DO NOTHING;" + _, err := s.db.Exec(metricsQuery, m.ChallengeID, m.MessageType, m.Data, m.SenderID, now, now) + if err != nil { + return err + } + + return nil +} + +// BatchInsertHCMetrics inserts the health-check challenges in a batch +func (s *SQLiteStore) BatchInsertHCMetrics(metrics []types.HealthCheckChallengeLogMessage) error { + tx, err := s.db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(` + INSERT OR IGNORE INTO healthcheck_challenge_metrics + (id, challenge_id, message_type, data, sender_id, created_at, updated_at) + VALUES (NULL,?,?,?,?,?,?) + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + for _, metric := range metrics { + now := time.Now().UTC() + + _, err = stmt.Exec(metric.ChallengeID, metric.MessageType, metric.Data, metric.Sender, now, now) + if err != nil { + tx.Rollback() + return err + } + } + + // Commit the transaction + return tx.Commit() +} + +// HealthCheckChallengeMetrics retrieves all the metrics needs to be broadcast +func (s *SQLiteStore) HealthCheckChallengeMetrics(timestamp time.Time) ([]types.HealthCheckChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM healthcheck_challenge_metrics + WHERE created_at > ? + ` + + rows, err := s.db.Query(query, timestamp) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.HealthCheckChallengeLogMessage + for rows.Next() { + var m types.HealthCheckChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// InsertBroadcastHealthCheckMessage inserts healthcheck healthcheck challenge msg to db +func (s *SQLiteStore) InsertBroadcastHealthCheckMessage(challenge types.BroadcastHealthCheckLogMessage) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO broadcast_healthcheck_challenge_messages(id, challenge_id, data, challenger, recipient, observers, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?);" + _, err := s.db.Exec(insertQuery, challenge.ChallengeID, challenge.Data, challenge.Challenger, challenge.Recipient, challenge.Observers, now, now) + if err != nil { + return err + } + + return nil +} + +// QueryHCChallengeMessage retrieves healthcheck challenge message against challengeID and messageType +func (s *SQLiteStore) QueryHCChallengeMessage(challengeID string, messageType int) (challengeMessage types.HealthCheckChallengeLogMessage, err error) { + const selectQuery = "SELECT * FROM healthcheck_challenge_messages WHERE challenge_id=? AND message_type=?" + err = s.db.QueryRow(selectQuery, challengeID, messageType).Scan( + &challengeMessage.ID, &challengeMessage.ChallengeID, &challengeMessage.MessageType, &challengeMessage.Data, + &challengeMessage.Sender, &challengeMessage.SenderSignature, &challengeMessage.CreatedAt, &challengeMessage.UpdatedAt) + + if err != nil { + return challengeMessage, err + } + + return challengeMessage, nil +} + +// GetHCMetricsByChallengeIDAndMessageType retrieves all the metrics by challengeID and messageType +func (s *SQLiteStore) GetHCMetricsByChallengeIDAndMessageType(challengeID string, messageType types.HealthCheckMessageType) ([]types.HealthCheckChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM healthcheck_challenge_metrics + WHERE challenge_id = ? + AND message_type = ?;` + + rows, err := s.db.Query(query, challengeID, int(messageType)) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.HealthCheckChallengeLogMessage + for rows.Next() { + var m types.HealthCheckChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +func processHCObserverEvaluations(observersEvaluations []types.HealthCheckChallengeLogMessage) map[string]HCObserverEvaluationMetrics { + evaluationMap := make(map[string]HCObserverEvaluationMetrics) + + for _, observerEvaluation := range observersEvaluations { + var oe types.HealthCheckMessageData + if err := json.Unmarshal(observerEvaluation.Data, &oe); err != nil { + continue + } + + oem, exists := evaluationMap[observerEvaluation.ChallengeID] + if !exists { + oem = HCObserverEvaluationMetrics{} // Initialize if not exists + } + + if isHCObserverEvaluationVerified(oe.ObserverEvaluation) { + oem.ChallengesVerified++ + } else { + if !oe.ObserverEvaluation.IsChallengeTimestampOK || + !oe.ObserverEvaluation.IsProcessTimestampOK || + !oe.ObserverEvaluation.IsEvaluationTimestampOK { + oem.FailedByInvalidTimestamps++ + } + + if !oe.ObserverEvaluation.IsChallengerSignatureOK || + !oe.ObserverEvaluation.IsRecipientSignatureOK { + oem.FailedByInvalidSignatures++ + } + + if !oe.ObserverEvaluation.IsEvaluationResultOK { + oem.FailedByInvalidEvaluation++ + } + } + + evaluationMap[observerEvaluation.ChallengeID] = oem + } + + return evaluationMap +} + +func isHCObserverEvaluationVerified(observerEvaluation types.HealthCheckObserverEvaluationData) bool { + if !observerEvaluation.IsEvaluationResultOK { + return false + } + + if !observerEvaluation.IsChallengerSignatureOK { + return false + } + + if !observerEvaluation.IsRecipientSignatureOK { + return false + } + + if !observerEvaluation.IsChallengeTimestampOK { + return false + } + + if !observerEvaluation.IsProcessTimestampOK { + return false + } + + if !observerEvaluation.IsEvaluationTimestampOK { + return false + } + + return true +} + +// GetDistinctHCChallengeIDsCountForScoreAggregation gets the count of distinct challenge ids for score aggregation +func (s *SQLiteStore) GetDistinctHCChallengeIDsCountForScoreAggregation(after, before time.Time) (int, error) { + query := ` + SELECT COUNT(DISTINCT challenge_id) + FROM healthcheck_challenge_metrics + WHERE message_type = 4 AND created_at >= ? AND created_at < ? + ` + + var challengeIDsCount int + err := s.db.QueryRow(query, after, before).Scan(&challengeIDsCount) + if err != nil { + return 0, err + } + + return challengeIDsCount, nil +} + +// GetDistinctHCChallengeIDs retrieves the distinct challenge ids for score aggregation +func (s *SQLiteStore) GetDistinctHCChallengeIDs(after, before time.Time, batchNumber int) ([]string, error) { + offset := batchNumber * batchSizeForChallengeIDsRetrieval + + query := ` + SELECT DISTINCT challenge_id + FROM healthcheck_challenge_metrics + WHERE message_type = 4 AND created_at >= ? AND created_at < ? + LIMIT ? OFFSET ? + ` + + rows, err := s.db.Query(query, after, before, batchSizeForChallengeIDsRetrieval, offset) + if err != nil { + return nil, err + } + defer rows.Close() + + var challengeIDs []string + for rows.Next() { + var challengeID string + if err := rows.Scan(&challengeID); err != nil { + return nil, err + } + challengeIDs = append(challengeIDs, challengeID) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return challengeIDs, nil +} diff --git a/pkg/storage/queries/local.go b/pkg/storage/queries/local.go new file mode 100644 index 00000000..e677de76 --- /dev/null +++ b/pkg/storage/queries/local.go @@ -0,0 +1,16 @@ +package queries + +import ( + "context" +) + +// LocalStoreInterface is interface for queries sqlite store +type LocalStoreInterface interface { + CloseHistoryDB(ctx context.Context) + + TaskHistoryQueries + SelfHealingQueries + StorageChallengeQueries + PingHistoryQueries + HealthCheckChallengeQueries +} diff --git a/pkg/storage/queries/ping_history.go b/pkg/storage/queries/ping_history.go new file mode 100644 index 00000000..84bfc6e0 --- /dev/null +++ b/pkg/storage/queries/ping_history.go @@ -0,0 +1,294 @@ +package queries + +import ( + "time" + + "github.com/LumeraProtocol/supernode/pkg/types" +) + +type PingHistoryQueries interface { + UpsertPingHistory(pingInfo types.PingInfo) error + GetPingInfoBySupernodeID(supernodeID string) (*types.PingInfo, error) + GetAllPingInfos() (types.PingInfos, error) + GetWatchlistPingInfo() ([]types.PingInfo, error) + GetAllPingInfoForOnlineNodes() (types.PingInfos, error) + UpdatePingInfo(supernodeID string, isOnWatchlist, isAdjusted bool) error + + UpdateSCMetricsBroadcastTimestamp(nodeID string, broadcastAt time.Time) error + UpdateMetricsBroadcastTimestamp(nodeID string) error + UpdateGenerationMetricsBroadcastTimestamp(nodeID string) error + UpdateExecutionMetricsBroadcastTimestamp(nodeID string) error + UpdateHCMetricsBroadcastTimestamp(nodeID string, broadcastAt time.Time) error +} + +// UpsertPingHistory inserts/update ping information into the ping_history table +func (s *SQLiteStore) UpsertPingHistory(pingInfo types.PingInfo) error { + now := time.Now().UTC() + + const upsertQuery = ` + INSERT INTO ping_history ( + supernode_id, ip_address, total_pings, total_successful_pings, + avg_ping_response_time, is_online, is_on_watchlist, is_adjusted, last_seen, cumulative_response_time, + created_at, updated_at + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(supernode_id) + DO UPDATE SET + total_pings = excluded.total_pings, + total_successful_pings = excluded.total_successful_pings, + avg_ping_response_time = excluded.avg_ping_response_time, + is_online = excluded.is_online, + is_on_watchlist = excluded.is_on_watchlist, + is_adjusted = excluded.is_adjusted, + last_seen = excluded.last_seen, + cumulative_response_time = excluded.cumulative_response_time, + updated_at = excluded.updated_at;` + + _, err := s.db.Exec(upsertQuery, + pingInfo.SupernodeID, pingInfo.IPAddress, pingInfo.TotalPings, + pingInfo.TotalSuccessfulPings, pingInfo.AvgPingResponseTime, + pingInfo.IsOnline, pingInfo.IsOnWatchlist, pingInfo.IsAdjusted, pingInfo.LastSeen.Time, pingInfo.CumulativeResponseTime, now, now) + if err != nil { + return err + } + + return nil +} + +// GetPingInfoBySupernodeID retrieves a ping history record by supernode ID +func (s *SQLiteStore) GetPingInfoBySupernodeID(supernodeID string) (*types.PingInfo, error) { + const selectQuery = ` + SELECT id, supernode_id, ip_address, total_pings, total_successful_pings, + avg_ping_response_time, is_online, is_on_watchlist, is_adjusted, last_seen, cumulative_response_time, + created_at, updated_at + FROM ping_history + WHERE supernode_id = ?;` + + var pingInfo types.PingInfo + row := s.db.QueryRow(selectQuery, supernodeID) + + // Scan the row into the PingInfo struct + err := row.Scan( + &pingInfo.ID, &pingInfo.SupernodeID, &pingInfo.IPAddress, &pingInfo.TotalPings, + &pingInfo.TotalSuccessfulPings, &pingInfo.AvgPingResponseTime, + &pingInfo.IsOnline, &pingInfo.IsOnWatchlist, &pingInfo.IsAdjusted, &pingInfo.LastSeen, &pingInfo.CumulativeResponseTime, + &pingInfo.CreatedAt, &pingInfo.UpdatedAt, + ) + + if err != nil { + return nil, err + } + + return &pingInfo, nil +} + +// GetWatchlistPingInfo retrieves all the nodes that are on watchlist +func (s *SQLiteStore) GetWatchlistPingInfo() ([]types.PingInfo, error) { + const selectQuery = ` + SELECT id, supernode_id, ip_address, total_pings, total_successful_pings, + avg_ping_response_time, is_online, is_on_watchlist, is_adjusted, last_seen, cumulative_response_time, + created_at, updated_at + FROM ping_history + WHERE is_on_watchlist = true AND is_adjusted = false;` + + rows, err := s.db.Query(selectQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + var pingInfos types.PingInfos + for rows.Next() { + var pingInfo types.PingInfo + if err := rows.Scan( + &pingInfo.ID, &pingInfo.SupernodeID, &pingInfo.IPAddress, &pingInfo.TotalPings, + &pingInfo.TotalSuccessfulPings, &pingInfo.AvgPingResponseTime, + &pingInfo.IsOnline, &pingInfo.IsOnWatchlist, &pingInfo.IsAdjusted, &pingInfo.LastSeen, &pingInfo.CumulativeResponseTime, + &pingInfo.CreatedAt, &pingInfo.UpdatedAt, + ); err != nil { + return nil, err + } + pingInfos = append(pingInfos, pingInfo) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return pingInfos, nil +} + +// UpdatePingInfo updates the ping info +func (s *SQLiteStore) UpdatePingInfo(supernodeID string, isOnWatchlist, isAdjusted bool) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET is_adjusted = ?, is_on_watchlist = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, isAdjusted, isOnWatchlist, supernodeID) + if err != nil { + return err + } + + return nil +} + +// UpdateMetricsBroadcastTimestamp updates the ping info metrics_last_broadcast_at +func (s *SQLiteStore) UpdateMetricsBroadcastTimestamp(nodeID string) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET metrics_last_broadcast_at = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, time.Now().UTC(), nodeID) + if err != nil { + return err + } + + return nil +} + +// UpdateGenerationMetricsBroadcastTimestamp updates the ping info generation_metrics_last_broadcast_at +func (s *SQLiteStore) UpdateGenerationMetricsBroadcastTimestamp(nodeID string) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET generation_metrics_last_broadcast_at = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, time.Now().Add(-180*time.Minute).UTC(), nodeID) + if err != nil { + return err + } + + return nil +} + +// UpdateExecutionMetricsBroadcastTimestamp updates the ping info execution_metrics_last_broadcast_at +func (s *SQLiteStore) UpdateExecutionMetricsBroadcastTimestamp(nodeID string) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET execution_metrics_last_broadcast_at = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, time.Now().Add(-180*time.Minute).UTC(), nodeID) + if err != nil { + return err + } + + return nil +} + +// UpdateSCMetricsBroadcastTimestamp updates the SC metrics last broadcast at timestamp +func (s *SQLiteStore) UpdateSCMetricsBroadcastTimestamp(nodeID string, updatedAt time.Time) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET metrics_last_broadcast_at = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, time.Now().UTC().Add(-180*time.Minute), nodeID) + if err != nil { + return err + } + + return nil +} + +// GetAllPingInfos retrieves all ping infos +func (s *SQLiteStore) GetAllPingInfos() (types.PingInfos, error) { + const selectQuery = ` + SELECT id, supernode_id, ip_address, total_pings, total_successful_pings, + avg_ping_response_time, is_online, is_on_watchlist, is_adjusted, last_seen, cumulative_response_time, + created_at, updated_at + FROM ping_history + ` + rows, err := s.db.Query(selectQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + var pingInfos types.PingInfos + for rows.Next() { + + var pingInfo types.PingInfo + if err := rows.Scan( + &pingInfo.ID, &pingInfo.SupernodeID, &pingInfo.IPAddress, &pingInfo.TotalPings, + &pingInfo.TotalSuccessfulPings, &pingInfo.AvgPingResponseTime, + &pingInfo.IsOnline, &pingInfo.IsOnWatchlist, &pingInfo.IsAdjusted, &pingInfo.LastSeen, &pingInfo.CumulativeResponseTime, + &pingInfo.CreatedAt, &pingInfo.UpdatedAt, + ); err != nil { + return nil, err + } + pingInfos = append(pingInfos, pingInfo) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return pingInfos, nil +} + +// GetAllPingInfoForOnlineNodes retrieves all ping infos for nodes that are online +func (s *SQLiteStore) GetAllPingInfoForOnlineNodes() (types.PingInfos, error) { + const selectQuery = ` + SELECT id, supernode_id, ip_address, total_pings, total_successful_pings, + avg_ping_response_time, is_online, is_on_watchlist, is_adjusted, last_seen, cumulative_response_time, + metrics_last_broadcast_at, generation_metrics_last_broadcast_at, execution_metrics_last_broadcast_at, + created_at, updated_at + FROM ping_history + WHERE is_online = true` + rows, err := s.db.Query(selectQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + var pingInfos types.PingInfos + for rows.Next() { + + var pingInfo types.PingInfo + if err := rows.Scan( + &pingInfo.ID, &pingInfo.SupernodeID, &pingInfo.IPAddress, &pingInfo.TotalPings, + &pingInfo.TotalSuccessfulPings, &pingInfo.AvgPingResponseTime, + &pingInfo.IsOnline, &pingInfo.IsOnWatchlist, &pingInfo.IsAdjusted, &pingInfo.LastSeen, &pingInfo.CumulativeResponseTime, + &pingInfo.MetricsLastBroadcastAt, &pingInfo.GenerationMetricsLastBroadcastAt, &pingInfo.ExecutionMetricsLastBroadcastAt, + &pingInfo.CreatedAt, &pingInfo.UpdatedAt, + ); err != nil { + return nil, err + } + pingInfos = append(pingInfos, pingInfo) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return pingInfos, nil +} + +// UpdateHCMetricsBroadcastTimestamp updates health-check challenges last broadcast at +func (s *SQLiteStore) UpdateHCMetricsBroadcastTimestamp(nodeID string, updatedAt time.Time) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET health_check_metrics_last_broadcast_at = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, time.Now().UTC().Add(-180*time.Minute), nodeID) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/storage/queries/self_healing.go b/pkg/storage/queries/self_healing.go new file mode 100644 index 00000000..5a4731f3 --- /dev/null +++ b/pkg/storage/queries/self_healing.go @@ -0,0 +1,644 @@ +package queries + +import ( + "context" + "fmt" + "time" + + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/types" + "github.com/LumeraProtocol/supernode/pkg/utils/metrics" + json "github.com/json-iterator/go" +) + +type SelfHealingQueries interface { + BatchInsertSelfHealingChallengeEvents(ctx context.Context, event []types.SelfHealingChallengeEvent) error + UpdateSHChallengeEventProcessed(challengeID string, isProcessed bool) error + GetSelfHealingChallengeEvents() ([]types.SelfHealingChallengeEvent, error) + CleanupSelfHealingChallenges() (err error) + QuerySelfHealingChallenges() (challenges []types.SelfHealingChallenge, err error) + + QueryMetrics(ctx context.Context, from time.Time, to *time.Time) (m metrics.Metrics, err error) + InsertSelfHealingGenerationMetrics(metrics types.SelfHealingGenerationMetric) error + InsertSelfHealingExecutionMetrics(metrics types.SelfHealingExecutionMetric) error + BatchInsertExecutionMetrics(metrics []types.SelfHealingExecutionMetric) error + GetSelfHealingGenerationMetrics(timestamp time.Time) ([]types.SelfHealingGenerationMetric, error) + GetSelfHealingExecutionMetrics(timestamp time.Time) ([]types.SelfHealingExecutionMetric, error) + GetLastNSHChallenges(ctx context.Context, n int) (types.SelfHealingReports, error) + GetSHChallengeReport(ctx context.Context, challengeID string) (types.SelfHealingReports, error) + GetSHExecutionMetrics(ctx context.Context, from time.Time) (metrics.SHExecutionMetrics, error) +} + +var ( + oneYearAgo = time.Now().AddDate(-1, 0, 0) +) + +// SHChallengeMetric represents the self-healing challenge metric +type SHChallengeMetric struct { + ChallengeID string + + // healer node + IsAck bool + IsAccepted bool + IsRejected bool + + // verifier nodes + HasMinVerifications bool + IsVerified bool + IsReconstructionRequiredVerified bool + IsReconstructionNotRequiredVerified bool + IsUnverified bool + IsReconstructionRequiredNotVerified bool + IsReconstructionNotRequiredNotVerified bool + IsReconstructionRequiredHashMismatch bool + + IsHealed bool +} + +type HCObserverEvaluationMetrics struct { + ChallengesVerified int + FailedByInvalidTimestamps int + FailedByInvalidSignatures int + FailedByInvalidEvaluation int +} + +type ObserverEvaluationMetrics struct { + ChallengesVerified int + FailedByInvalidTimestamps int + FailedByInvalidSignatures int + FailedByInvalidEvaluation int +} + +// InsertSelfHealingGenerationMetrics inserts self-healing generation metrics +func (s *SQLiteStore) InsertSelfHealingGenerationMetrics(metrics types.SelfHealingGenerationMetric) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO self_healing_generation_metrics(id, trigger_id, message_type, data, sender_id, sender_signature, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?) ON CONFLICT DO NOTHING;" + _, err := s.db.Exec(insertQuery, metrics.TriggerID, metrics.MessageType, metrics.Data, metrics.SenderID, metrics.SenderSignature, now, now) + if err != nil { + return err + } + + return nil +} + +// InsertSelfHealingExecutionMetrics inserts self-healing execution metrics +func (s *SQLiteStore) InsertSelfHealingExecutionMetrics(metrics types.SelfHealingExecutionMetric) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO self_healing_execution_metrics(id, trigger_id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?,?) ON CONFLICT DO NOTHING;" + + _, err := s.db.Exec(insertQuery, metrics.TriggerID, metrics.ChallengeID, metrics.MessageType, metrics.Data, metrics.SenderID, metrics.SenderSignature, now, now) + if err != nil { + return err + } + + return nil +} + +// BatchInsertExecutionMetrics inserts execution metrics in a batch +func (s *SQLiteStore) BatchInsertExecutionMetrics(metrics []types.SelfHealingExecutionMetric) error { + tx, err := s.db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(` + INSERT OR IGNORE INTO self_healing_execution_metrics + (id, trigger_id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at) + VALUES (NULL,?,?,?,?,?,?,?,?) + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + for _, metric := range metrics { + now := time.Now().UTC() + + _, err = stmt.Exec(metric.TriggerID, metric.ChallengeID, metric.MessageType, metric.Data, metric.SenderID, metric.SenderSignature, now, now) + if err != nil { + tx.Rollback() + return err + } + } + + // Commit the transaction + return tx.Commit() +} + +// GetSelfHealingExecutionMetrics retrieves all self_healing_execution_metrics records created after the specified timestamp. +func (s *SQLiteStore) GetSelfHealingExecutionMetrics(timestamp time.Time) ([]types.SelfHealingExecutionMetric, error) { + const query = ` + SELECT id, trigger_id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at + FROM self_healing_execution_metrics + WHERE created_at > ? + ` + + rows, err := s.db.Query(query, timestamp) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.SelfHealingExecutionMetric + for rows.Next() { + var m types.SelfHealingExecutionMetric + if err := rows.Scan(&m.ID, &m.TriggerID, &m.ChallengeID, &m.MessageType, &m.Data, &m.SenderID, &m.SenderSignature, &m.CreatedAt, &m.UpdatedAt); err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetSelfHealingGenerationMetrics retrieves all self_healing_generation_metrics records created after the specified timestamp. +func (s *SQLiteStore) GetSelfHealingGenerationMetrics(timestamp time.Time) ([]types.SelfHealingGenerationMetric, error) { + const query = ` + SELECT id, trigger_id, message_type, data, sender_id, sender_signature, created_at, updated_at + FROM self_healing_generation_metrics + WHERE created_at > ? + ` + + rows, err := s.db.Query(query, timestamp) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.SelfHealingGenerationMetric + for rows.Next() { + var m types.SelfHealingGenerationMetric + if err := rows.Scan(&m.ID, &m.TriggerID, &m.MessageType, &m.Data, &m.SenderID, &m.SenderSignature, &m.CreatedAt, &m.UpdatedAt); err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetLastNSCMetrics gets the N number of latest challenge IDs from the DB +func (s *SQLiteStore) GetLastNSCMetrics() ([]types.NScMetric, error) { + const query = ` +SELECT + count(*) AS count, + challenge_id, + MAX(created_at) AS most_recent +FROM + storage_challenge_metrics +GROUP BY + challenge_id +HAVING + count(*) > 5 +ORDER BY + most_recent DESC +LIMIT 20;` + + rows, err := s.db.Query(query) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.NScMetric + for rows.Next() { + var m types.NScMetric + err := rows.Scan(&m.Count, &m.ChallengeID, &m.CreatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetLastNHCMetrics gets the N number of latest health-check challenge IDs from the DB +func (s *SQLiteStore) GetLastNHCMetrics() ([]types.NHcMetric, error) { + const query = ` +SELECT + count(*) AS count, + challenge_id, + MAX(created_at) AS most_recent +FROM + healthcheck_challenge_metrics +GROUP BY + challenge_id +HAVING + count(*) > 5 +ORDER BY + most_recent DESC +LIMIT 20;` + + rows, err := s.db.Query(query) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.NHcMetric + for rows.Next() { + var m types.NHcMetric + err := rows.Scan(&m.Count, &m.ChallengeID, &m.CreatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetSHExecutionMetrics retrieves self-healing execution metrics +func (s *SQLiteStore) GetSHExecutionMetrics(ctx context.Context, from time.Time) (metrics.SHExecutionMetrics, error) { + m := metrics.SHExecutionMetrics{} + rows, err := s.GetSelfHealingExecutionMetrics(from) + if err != nil { + return m, err + } + log.WithContext(ctx).WithField("rows", len(rows)).Info("self-healing execution metrics row count") + + challenges := make(map[string]SHChallengeMetric) + for _, row := range rows { + if _, ok := challenges[row.ChallengeID]; !ok { + challenges[row.ChallengeID] = SHChallengeMetric{ + ChallengeID: row.ChallengeID, + } + } + + if row.MessageType == int(types.SelfHealingVerificationMessage) { + messages := types.SelfHealingMessages{} + if err := json.Unmarshal(row.Data, &messages); err != nil { + return m, fmt.Errorf("cannot unmarshal self healing execution message type 3: %w - row ID: %d", err, row.ID) + } + + if len(messages) >= minVerifications { + ch := challenges[row.ChallengeID] + ch.HasMinVerifications = true + challenges[row.ChallengeID] = ch + } + + reconReqVerified := 0 + reconNotReqVerified := 0 + reconReqUnverified := 0 + reconNotReqUnverified := 0 + reconReqHashMismatch := 0 + + for _, message := range messages { + if message.SelfHealingMessageData.Verification.VerifiedTicket.IsReconstructionRequired { + if message.SelfHealingMessageData.Verification.VerifiedTicket.IsReconstructionRequiredByHealer { + if message.SelfHealingMessageData.Verification.VerifiedTicket.IsVerified { + reconReqVerified++ + } else { + reconReqHashMismatch++ + } + } else { + reconNotReqUnverified++ + } + } else { + if message.SelfHealingMessageData.Verification.VerifiedTicket.IsReconstructionRequiredByHealer { + reconReqUnverified++ + } else { + reconNotReqVerified++ + } + } + } + + if reconReqVerified >= minVerifications { + ch := challenges[row.ChallengeID] + ch.IsVerified = true + ch.IsReconstructionRequiredVerified = true + challenges[row.ChallengeID] = ch + } else if reconNotReqVerified >= minVerifications { + ch := challenges[row.ChallengeID] + ch.IsVerified = true + ch.IsReconstructionNotRequiredVerified = true + challenges[row.ChallengeID] = ch + } else if reconReqUnverified >= minVerifications { + ch := challenges[row.ChallengeID] + ch.IsUnverified = true + ch.IsReconstructionRequiredNotVerified = true + challenges[row.ChallengeID] = ch + } else if reconNotReqUnverified >= minVerifications { + ch := challenges[row.ChallengeID] + ch.IsUnverified = true + ch.IsReconstructionNotRequiredNotVerified = true + challenges[row.ChallengeID] = ch + } else if reconReqHashMismatch >= minVerifications { + ch := challenges[row.ChallengeID] + ch.IsReconstructionRequiredHashMismatch = true + challenges[row.ChallengeID] = ch + } + + } else if row.MessageType == int(types.SelfHealingResponseMessage) { + messages := types.SelfHealingMessages{} + if err := json.Unmarshal(row.Data, &messages); err != nil { + return m, fmt.Errorf("cannot unmarshal self healing execution message type 3: %w - row ID: %d", err, row.ID) + } + if len(messages) == 0 { + return m, fmt.Errorf("len of selfhealing messages should not be 0 - problem with row ID %d", row.ID) + } + + data := messages[0].SelfHealingMessageData + + ch := challenges[row.ChallengeID] + if data.Response.RespondedTicket.IsReconstructionRequired { + ch.IsAccepted = true + } else { + ch.IsRejected = true + } + challenges[row.ChallengeID] = ch + + } else if row.MessageType == int(types.SelfHealingCompletionMessage) { + ch := challenges[row.ChallengeID] + ch.IsHealed = true + challenges[row.ChallengeID] = ch + } else if row.MessageType == int(types.SelfHealingAcknowledgementMessage) { + ch := challenges[row.ChallengeID] + ch.IsAck = true + challenges[row.ChallengeID] = ch + } + } + + log.WithContext(ctx).WithField("challenges", len(challenges)).Info("self-healing execution metrics challenges count") + + for _, challenge := range challenges { + log.WithContext(ctx).WithField("challenge-id", challenge.ChallengeID).WithField("is-accepted", challenge.IsAccepted). + WithField("is-verified", challenge.IsVerified).WithField("is-healed", challenge.IsHealed). + Info("self-healing challenge metric") + + if challenge.IsAck { + m.TotalChallengesAcknowledged++ + } + + if challenge.IsAccepted { + m.TotalChallengesAccepted++ + } + + if challenge.IsRejected { + m.TotalChallengesRejected++ + } + + if challenge.IsVerified { + m.TotalChallengeEvaluationsVerified++ + } + + if challenge.IsReconstructionRequiredVerified { + m.TotalReconstructionsApproved++ + } + + if challenge.IsReconstructionNotRequiredVerified { + m.TotalReconstructionsNotRquiredApproved++ + } + + if challenge.IsUnverified { + m.TotalChallengeEvaluationsUnverified++ + } + + if challenge.IsReconstructionRequiredNotVerified { + m.TotalReconstructionsNotApproved++ + } + + if challenge.IsReconstructionNotRequiredNotVerified { + m.TotalReconstructionsNotRequiredEvaluationNotApproved++ + } + + if challenge.IsReconstructionRequiredHashMismatch { + m.TotalReconstructionRequiredHashMismatch++ + } + + if challenge.IsHealed { + m.TotalFilesHealed++ + } + } + + return m, nil +} + +// QueryMetrics queries the self-healing metrics +func (s *SQLiteStore) QueryMetrics(ctx context.Context, from time.Time, _ *time.Time) (m metrics.Metrics, err error) { + genMetric, err := s.GetSelfHealingGenerationMetrics(from) + if err != nil { + return metrics.Metrics{}, err + } + + te := metrics.SHTriggerMetrics{} + challengesIssued := 0 + for _, metric := range genMetric { + t := metrics.SHTriggerMetric{} + data := types.SelfHealingMessages{} + if err := json.Unmarshal(metric.Data, &data); err != nil { + return metrics.Metrics{}, fmt.Errorf("cannot unmarshal self healing generation message type 3: %w", err) + } + + if len(data) < 1 { + return metrics.Metrics{}, fmt.Errorf("len of selfhealing messages data JSON should not be 0") + } + + t.TriggerID = metric.TriggerID + t.ListOfNodes = data[0].SelfHealingMessageData.Challenge.NodesOnWatchlist + t.TotalTicketsIdentified = len(data[0].SelfHealingMessageData.Challenge.ChallengeTickets) + + for _, ticket := range data[0].SelfHealingMessageData.Challenge.ChallengeTickets { + t.TotalFilesIdentified += len(ticket.MissingKeys) + } + + challengesIssued += t.TotalTicketsIdentified + + te = append(te, t) + } + + em, err := s.GetSHExecutionMetrics(ctx, from) + if err != nil { + return metrics.Metrics{}, fmt.Errorf("cannot get self healing execution metrics: %w", err) + } + + em.TotalChallengesIssued = challengesIssued + em.TotalFileHealingFailed = em.TotalReconstructionsApproved - em.TotalFilesHealed + + m.SHTriggerMetrics = te + + m.SHExecutionMetrics = em + + return m, nil +} + +// GetLastNSHChallenges retrieves the latest 'N' self-healing challenges +func (s *SQLiteStore) GetLastNSHChallenges(ctx context.Context, n int) (types.SelfHealingReports, error) { + challenges := types.SelfHealingReports{} + rows, err := s.GetSelfHealingExecutionMetrics(oneYearAgo) + if err != nil { + return challenges, err + } + log.WithContext(ctx).WithField("rows", len(rows)).Info("self-healing execution metrics row count") + + challengesInserted := 0 + for _, row := range rows { + if _, ok := challenges[row.ChallengeID]; !ok { + if challengesInserted == n { + continue + } + + challenges[row.ChallengeID] = types.SelfHealingReport{} + challengesInserted++ + } + + messages := types.SelfHealingMessages{} + if err := json.Unmarshal(row.Data, &messages); err != nil { + return challenges, fmt.Errorf("cannot unmarshal self healing execution message type 3: %w", err) + } + + msgType := types.SelfHealingMessageType(row.MessageType) + challenges[row.ChallengeID][msgType.String()] = messages + } + + return challenges, nil +} + +// GetSHChallengeReport returns the self-healing report +func (s *SQLiteStore) GetSHChallengeReport(ctx context.Context, challengeID string) (types.SelfHealingReports, error) { + challenges := types.SelfHealingReports{} + rows, err := s.GetSelfHealingExecutionMetrics(oneYearAgo) + if err != nil { + return challenges, err + } + log.WithContext(ctx).WithField("rows", len(rows)).Info("self-healing execution metrics row count") + + for _, row := range rows { + if row.ChallengeID == challengeID { + if _, ok := challenges[row.ChallengeID]; !ok { + challenges[row.ChallengeID] = types.SelfHealingReport{} + } + + messages := types.SelfHealingMessages{} + if err := json.Unmarshal(row.Data, &messages); err != nil { + return challenges, fmt.Errorf("cannot unmarshal self healing execution message type 3: %w", err) + } + + msgType := types.SelfHealingMessageType(row.MessageType) + challenges[row.ChallengeID][msgType.String()] = messages + } + } + + return challenges, nil +} + +// QuerySelfHealingChallenges retrieves self-healing audit logs stored in DB for self-healing +func (s *SQLiteStore) QuerySelfHealingChallenges() (challenges []types.SelfHealingChallenge, err error) { + const selectQuery = "SELECT * FROM self_healing_challenges" + rows, err := s.db.Query(selectQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + challenge := types.SelfHealingChallenge{} + err = rows.Scan(&challenge.ID, &challenge.ChallengeID, &challenge.MerkleRoot, &challenge.FileHash, + &challenge.ChallengingNode, &challenge.RespondingNode, &challenge.VerifyingNode, &challenge.ReconstructedFileHash, + &challenge.Status, &challenge.CreatedAt, &challenge.UpdatedAt) + if err != nil { + return nil, err + } + + challenges = append(challenges, challenge) + } + + return challenges, nil +} + +// BatchInsertSelfHealingChallengeEvents inserts self-healing-challenge events in a batch +func (s *SQLiteStore) BatchInsertSelfHealingChallengeEvents(ctx context.Context, eventsBatch []types.SelfHealingChallengeEvent) error { + tx, err := s.db.BeginTx(ctx, nil) + if err != nil { + return err + } + + stmt, err := tx.Prepare(` + INSERT OR IGNORE INTO self_healing_challenge_events + (trigger_id, ticket_id, challenge_id, data, sender_id, is_processed, created_at, updated_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + stmt2, err := tx.Prepare(` + INSERT OR IGNORE INTO self_healing_execution_metrics(id, trigger_id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at) + VALUES(NULL,?,?,?,?,?,?,?,?); + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt2.Close() + + for _, event := range eventsBatch { + now := time.Now().UTC() + + _, err = stmt.Exec(event.TriggerID, event.TicketID, event.ChallengeID, event.Data, event.SenderID, false, now, now) + if err != nil { + tx.Rollback() + return err + } + + _, err = stmt2.Exec(event.ExecMetric.TriggerID, event.ExecMetric.ChallengeID, event.ExecMetric.MessageType, event.ExecMetric.Data, event.ExecMetric.SenderID, event.ExecMetric.SenderSignature, now, now) + if err != nil { + tx.Rollback() + return err + } + } + + return tx.Commit() +} + +// GetSelfHealingChallengeEvents retrieves the challenge events from DB +func (s *SQLiteStore) GetSelfHealingChallengeEvents() ([]types.SelfHealingChallengeEvent, error) { + const selectQuery = ` + SELECT trigger_id, ticket_id, challenge_id, data, sender_id, is_processed, created_at, updated_at + FROM self_healing_challenge_events + WHERE is_processed = false + ` + rows, err := s.db.Query(selectQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + var events []types.SelfHealingChallengeEvent + + for rows.Next() { + var event types.SelfHealingChallengeEvent + if err := rows.Scan( + &event.TriggerID, &event.TicketID, &event.ChallengeID, &event.Data, &event.SenderID, &event.IsProcessed, + &event.CreatedAt, &event.UpdatedAt, + ); err != nil { + return nil, err + } + + events = append(events, event) + } + + return events, nil +} + +// UpdateSHChallengeEventProcessed updates the is_processed flag of an event +func (s *SQLiteStore) UpdateSHChallengeEventProcessed(challengeID string, isProcessed bool) error { + const updateQuery = ` + UPDATE self_healing_challenge_events + SET is_processed = ? + WHERE challenge_id = ? + ` + _, err := s.db.Exec(updateQuery, isProcessed, challengeID) + return err +} + +// CleanupSelfHealingChallenges cleans up self-healing challenges stored in DB for inspection +func (s *SQLiteStore) CleanupSelfHealingChallenges() (err error) { + const delQuery = "DELETE FROM self_healing_challenges" + _, err = s.db.Exec(delQuery) + return err +} diff --git a/pkg/storage/queries/sqlite.go b/pkg/storage/queries/sqlite.go new file mode 100644 index 00000000..c1d4cb02 --- /dev/null +++ b/pkg/storage/queries/sqlite.go @@ -0,0 +1,413 @@ +package queries + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/LumeraProtocol/supernode/pkg/configurer" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/jmoiron/sqlx" + _ "github.com/mattn/go-sqlite3" //go-sqlite3 +) + +var ( + DefaulthPath = configurer.DefaultPath() +) + +const minVerifications = 3 +const createTaskHistory string = ` + CREATE TABLE IF NOT EXISTS task_history ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + time DATETIME NOT NULL, + task_id TEXT NOT NULL, + status TEXT NOT NULL + );` + +const alterTaskHistory string = `ALTER TABLE task_history ADD COLUMN details TEXT;` + +const createStorageChallengeMessages string = ` + CREATE TABLE IF NOT EXISTS storage_challenge_messages ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + sender_signature BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createBroadcastChallengeMessages string = ` + CREATE TABLE IF NOT EXISTS broadcast_challenge_messages ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + challenger TEXT NOT NULL, + recipient TEXT NOT NULL, + observers TEXT NOT NULL, + data BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createStorageChallengeMessagesUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS storage_challenge_messages_unique ON storage_challenge_messages(challenge_id, message_type, sender_id); +` + +const createSelfHealingChallenges string = ` + CREATE TABLE IF NOT EXISTS self_healing_challenges ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + merkleroot TEXT NOT NULL, + file_hash TEXT NOT NULL, + challenging_node TEXT NOT NULL, + responding_node TEXT NOT NULL, + verifying_node TEXT, + reconstructed_file_hash BLOB, + status TEXT NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL + );` + +const createPingHistory string = ` + CREATE TABLE IF NOT EXISTS ping_history ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + supernode_id TEXT UNIQUE NOT NULL, + ip_address TEXT UNIQUE NOT NULL, + total_pings INTEGER NOT NULL, + total_successful_pings INTEGER NOT NULL, + avg_ping_response_time FLOAT NOT NULL, + is_online BOOLEAN NOT NULL, + is_on_watchlist BOOLEAN NOT NULL, + is_adjusted BOOLEAN NOT NULL, + cumulative_response_time FLOAT NOT NULL, + last_seen DATETIME NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL + );` + +const createPingHistoryUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS ping_history_unique ON ping_history(supernode_id, ip_address); +` + +const createSelfHealingGenerationMetrics string = ` + CREATE TABLE IF NOT EXISTS self_healing_generation_metrics ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + trigger_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + sender_signature BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createSelfHealingGenerationMetricsUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS self_healing_generation_metrics_unique ON self_healing_generation_metrics(trigger_id); +` + +const createSelfHealingExecutionMetrics string = ` + CREATE TABLE IF NOT EXISTS self_healing_execution_metrics ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + trigger_id TEXT NOT NULL, + challenge_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + sender_signature BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createSelfHealingChallengeTickets string = ` + CREATE TABLE IF NOT EXISTS self_healing_challenge_events ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + trigger_id TEXT NOT NULL, + ticket_id TEXT NOT NULL, + challenge_id TEXT NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + is_processed BOOLEAN NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +); +` + +const createSelfHealingChallengeTicketsUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS self_healing_challenge_events_unique ON self_healing_challenge_events(trigger_id, ticket_id, challenge_id); +` + +const createSelfHealingExecutionMetricsUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS self_healing_execution_metrics_unique ON self_healing_execution_metrics(trigger_id, challenge_id, message_type); +` + +const alterTablePingHistory = `ALTER TABLE ping_history +ADD COLUMN metrics_last_broadcast_at DATETIME NULL;` + +const alterTablePingHistoryGenerationMetrics = `ALTER TABLE ping_history +ADD COLUMN generation_metrics_last_broadcast_at DATETIME NULL;` + +const alterTablePingHistoryExecutionMetrics = `ALTER TABLE ping_history +ADD COLUMN execution_metrics_last_broadcast_at DATETIME NULL;` + +const createStorageChallengeMetrics string = ` + CREATE TABLE IF NOT EXISTS storage_challenge_metrics ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createStorageChallengeMetricsUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS storage_challenge_metrics_unique ON storage_challenge_metrics(challenge_id, message_type, sender_id); +` + +const createHealthCheckChallengeMessages string = ` + CREATE TABLE IF NOT EXISTS healthcheck_challenge_messages ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + sender_signature BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createBroadcastHealthCheckChallengeMessages string = ` + CREATE TABLE IF NOT EXISTS broadcast_healthcheck_challenge_messages ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + challenger TEXT NOT NULL, + recipient TEXT NOT NULL, + observers TEXT NOT NULL, + data BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createHealthCheckChallengeMetrics string = ` + CREATE TABLE IF NOT EXISTS healthcheck_challenge_metrics ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +); +` +const createHealthCheckChallengeMetricsUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS healthcheck_challenge_metrics_unique ON healthcheck_challenge_metrics(challenge_id, message_type, sender_id); +` +const alterTablePingHistoryHealthCheckColumn = `ALTER TABLE ping_history +ADD COLUMN health_check_metrics_last_broadcast_at DATETIME NULL;` + +const createPingHistoryWithoutUniqueIPAddress string = ` +BEGIN TRANSACTION; + +CREATE TABLE IF NOT EXISTS new_ping_history ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + supernode_id TEXT UNIQUE NOT NULL, + ip_address TEXT NOT NULL, -- Removed UNIQUE constraint here + total_pings INTEGER NOT NULL, + total_successful_pings INTEGER NOT NULL, + avg_ping_response_time FLOAT NOT NULL, + is_online BOOLEAN NOT NULL, + is_on_watchlist BOOLEAN NOT NULL, + is_adjusted BOOLEAN NOT NULL, + cumulative_response_time FLOAT NOT NULL, + last_seen DATETIME NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL, + metrics_last_broadcast_at DATETIME, -- Assuming these columns already exist in the old table + generation_metrics_last_broadcast_at DATETIME, + execution_metrics_last_broadcast_at DATETIME, + health_check_metrics_last_broadcast_at DATETIME +); + +-- Step 2: Copy data including all columns from the old table +INSERT INTO new_ping_history ( + id, + supernode_id, + ip_address, + total_pings, + total_successful_pings, + avg_ping_response_time, + is_online, + is_on_watchlist, + is_adjusted, + cumulative_response_time, + last_seen, + created_at, + updated_at, + metrics_last_broadcast_at, + generation_metrics_last_broadcast_at, + execution_metrics_last_broadcast_at, + health_check_metrics_last_broadcast_at +) +SELECT + id, + supernode_id, + ip_address, + total_pings, + total_successful_pings, + avg_ping_response_time, + is_online, + is_on_watchlist, + is_adjusted, + cumulative_response_time, + last_seen, + created_at, + updated_at, + metrics_last_broadcast_at, + generation_metrics_last_broadcast_at, + execution_metrics_last_broadcast_at, + health_check_metrics_last_broadcast_at +FROM ping_history; + +-- Step 3: Drop the original table +DROP TABLE ping_history; + +-- Step 4: Rename the new table to the original table's name +ALTER TABLE new_ping_history RENAME TO ping_history; + +COMMIT; +` + +const ( + historyDBName = "history.db" + emptyString = "" +) + +// SQLiteStore handles sqlite ops +type SQLiteStore struct { + db *sqlx.DB +} + +// CloseHistoryDB closes history database +func (s *SQLiteStore) CloseHistoryDB(ctx context.Context) { + if err := s.db.Close(); err != nil { + log.WithContext(ctx).WithError(err).Error("error closing history db") + } +} + +// OpenHistoryDB opens history DB +func OpenHistoryDB() (LocalStoreInterface, error) { + dbFile := filepath.Join(DefaulthPath, historyDBName) + db, err := sqlx.Connect("sqlite3", dbFile) + if err != nil { + return nil, fmt.Errorf("cannot open sqlite database: %w", err) + } + + if _, err := db.Exec(createTaskHistory); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createStorageChallengeMessages); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createStorageChallengeMessagesUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot execute migration: %w", err) + } + + if _, err := db.Exec(createBroadcastChallengeMessages); err != nil { + return nil, fmt.Errorf("cannot execute migration: %w", err) + } + + if _, err := db.Exec(createSelfHealingChallenges); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createPingHistory); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createPingHistoryUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createSelfHealingGenerationMetrics); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createSelfHealingGenerationMetricsUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createSelfHealingExecutionMetrics); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createSelfHealingExecutionMetricsUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createSelfHealingChallengeTickets); err != nil { + return nil, fmt.Errorf("cannot create createSelfHealingChallengeTickets: %w", err) + } + + if _, err := db.Exec(createSelfHealingChallengeTicketsUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create createSelfHealingChallengeTicketsUniqueIndex: %w", err) + } + + if _, err := db.Exec(createStorageChallengeMetrics); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createStorageChallengeMetricsUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createHealthCheckChallengeMessages); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createHealthCheckChallengeMetrics); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createHealthCheckChallengeMetricsUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createBroadcastHealthCheckChallengeMessages); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + _, _ = db.Exec(alterTaskHistory) + + _, _ = db.Exec(alterTablePingHistory) + + _, _ = db.Exec(alterTablePingHistoryGenerationMetrics) + + _, _ = db.Exec(alterTablePingHistoryExecutionMetrics) + + _, _ = db.Exec(alterTablePingHistoryHealthCheckColumn) + + _, err = db.Exec(createPingHistoryWithoutUniqueIPAddress) + if err != nil { + log.WithError(err).Error("error executing ping-history w/o unique ip-address constraint migration") + } + + pragmas := []string{ + "PRAGMA synchronous=NORMAL;", + "PRAGMA cache_size=-262144;", + "PRAGMA busy_timeout=120000;", + "PRAGMA journal_mode=WAL;", + } + + for _, pragma := range pragmas { + if _, err := db.Exec(pragma); err != nil { + return nil, fmt.Errorf("cannot set sqlite database parameter: %w", err) + } + } + + return &SQLiteStore{ + db: db, + }, nil +} diff --git a/pkg/storage/queries/storage_challenge.go b/pkg/storage/queries/storage_challenge.go new file mode 100644 index 00000000..204fd0cc --- /dev/null +++ b/pkg/storage/queries/storage_challenge.go @@ -0,0 +1,493 @@ +package queries + +import ( + "context" + "fmt" + "time" + + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/types" + "github.com/LumeraProtocol/supernode/pkg/utils/metrics" + + json "github.com/json-iterator/go" +) + +const batchSizeForChallengeIDsRetrieval = 500 + +type StorageChallengeQueries interface { + InsertStorageChallengeMessage(challenge types.StorageChallengeLogMessage) error + InsertBroadcastMessage(challenge types.BroadcastLogMessage) error + QueryStorageChallengeMessage(challengeID string, messageType int) (challenge types.StorageChallengeLogMessage, err error) + CleanupStorageChallenges() (err error) + GetStorageChallengeMetricsByChallengeID(challengeID string) ([]types.StorageChallengeLogMessage, error) + GetMetricsByChallengeIDAndMessageType(challengeID string, messageType types.MessageType) ([]types.StorageChallengeLogMessage, error) + + BatchInsertSCMetrics(metrics []types.StorageChallengeLogMessage) error + StorageChallengeMetrics(timestamp time.Time) ([]types.StorageChallengeLogMessage, error) + InsertStorageChallengeMetric(metric types.StorageChallengeMetric) error + GetSCSummaryStats(from time.Time) (scMetrics metrics.SCMetrics, err error) + GetTotalSCGeneratedAndProcessedAndEvaluated(from time.Time) (metrics.SCMetrics, error) + GetChallengerEvaluations(from time.Time) ([]types.StorageChallengeLogMessage, error) + GetObserversEvaluations(from time.Time) ([]types.StorageChallengeLogMessage, error) + GetMetricsDataByStorageChallengeID(ctx context.Context, challengeID string) ([]types.Message, error) + GetLastNSCMetrics() ([]types.NScMetric, error) + GetDistinctChallengeIDsCountForScoreAggregation(after, before time.Time) (int, error) + GetDistinctChallengeIDs(after, before time.Time, batchNumber int) ([]string, error) + BatchInsertScoreAggregationChallenges(challengeIDs []string, isAggregated bool) error +} + +// InsertStorageChallengeMessage inserts failed storage challenge to db +func (s *SQLiteStore) InsertStorageChallengeMessage(challenge types.StorageChallengeLogMessage) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO storage_challenge_messages(id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?) ON CONFLICT DO NOTHING;" + _, err := s.db.Exec(insertQuery, challenge.ChallengeID, challenge.MessageType, challenge.Data, challenge.Sender, challenge.SenderSignature, now, now) + if err != nil { + return err + } + + return nil +} + +func (s *SQLiteStore) InsertStorageChallengeMetric(m types.StorageChallengeMetric) error { + now := time.Now().UTC() + + const metricsQuery = "INSERT INTO storage_challenge_metrics(id, challenge_id, message_type, data, sender_id, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?) ON CONFLICT DO NOTHING;" + _, err := s.db.Exec(metricsQuery, m.ChallengeID, m.MessageType, m.Data, m.SenderID, now, now) + if err != nil { + return err + } + + return nil +} + +func (s *SQLiteStore) BatchInsertSCMetrics(metrics []types.StorageChallengeLogMessage) error { + tx, err := s.db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(` + INSERT OR IGNORE INTO storage_challenge_metrics + (id, challenge_id, message_type, data, sender_id, created_at, updated_at) + VALUES (NULL,?,?,?,?,?,?) + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + for _, metric := range metrics { + now := time.Now().UTC() + + _, err = stmt.Exec(metric.ChallengeID, metric.MessageType, metric.Data, metric.Sender, now, now) + if err != nil { + tx.Rollback() + return err + } + } + + // Commit the transaction + return tx.Commit() +} + +func (s *SQLiteStore) GetMetricsDataByStorageChallengeID(ctx context.Context, challengeID string) (storageChallengeMessages []types.Message, err error) { + scMetrics, err := s.GetStorageChallengeMetricsByChallengeID(challengeID) + if err != nil { + return storageChallengeMessages, err + } + log.WithContext(ctx).WithField("rows", len(scMetrics)).Info("storage-challenge metrics row count") + + for _, scMetric := range scMetrics { + msg := types.MessageData{} + if err := json.Unmarshal(scMetric.Data, &msg); err != nil { + return storageChallengeMessages, fmt.Errorf("cannot unmarshal storage challenge data: %w", err) + } + + storageChallengeMessages = append(storageChallengeMessages, types.Message{ + ChallengeID: scMetric.ChallengeID, + MessageType: types.MessageType(scMetric.MessageType), + Sender: scMetric.Sender, + SenderSignature: scMetric.SenderSignature, + Data: msg, + }) + } + + return storageChallengeMessages, nil +} + +func (s *SQLiteStore) GetTotalSCGeneratedAndProcessedAndEvaluated(from time.Time) (metrics.SCMetrics, error) { + metrics := metrics.SCMetrics{} + + // Query for total number of challenges + totalChallengeQuery := "SELECT COUNT(DISTINCT challenge_id) FROM storage_challenge_metrics WHERE message_type = 1 AND created_at > ?" + err := s.db.QueryRow(totalChallengeQuery, from).Scan(&metrics.TotalChallenges) + if err != nil { + return metrics, err + } + + // Query for total challenges responded + totalChallengesProcessedQuery := "SELECT COUNT(DISTINCT challenge_id) FROM storage_challenge_metrics WHERE message_type = 2 AND created_at > ?" + err = s.db.QueryRow(totalChallengesProcessedQuery, from).Scan(&metrics.TotalChallengesProcessed) + if err != nil { + return metrics, err + } + + totalChallengesEvaluatedQuery := "SELECT COUNT(DISTINCT challenge_id) FROM storage_challenge_metrics WHERE message_type = 3 AND created_at > ?" + err = s.db.QueryRow(totalChallengesEvaluatedQuery, from).Scan(&metrics.TotalChallengesEvaluatedByChallenger) + if err != nil { + return metrics, err + } + + return metrics, nil +} + +func (s *SQLiteStore) GetChallengerEvaluations(from time.Time) ([]types.StorageChallengeLogMessage, error) { + var messages []types.StorageChallengeLogMessage + + query := "SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at FROM storage_challenge_metrics WHERE message_type = 3 and created_at > ?" + rows, err := s.db.Query(query, from) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var msg types.StorageChallengeLogMessage + err := rows.Scan(&msg.ID, &msg.ChallengeID, &msg.MessageType, &msg.Data, &msg.Sender, &msg.CreatedAt, &msg.UpdatedAt) + if err != nil { + return nil, err + } + messages = append(messages, msg) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return messages, nil +} + +func (s *SQLiteStore) GetObserversEvaluations(from time.Time) ([]types.StorageChallengeLogMessage, error) { + var messages []types.StorageChallengeLogMessage + + query := "SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at FROM storage_challenge_metrics WHERE message_type = 4 and created_at > ?" + rows, err := s.db.Query(query, from) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var msg types.StorageChallengeLogMessage + err := rows.Scan(&msg.ID, &msg.ChallengeID, &msg.MessageType, &msg.Data, &msg.Sender, &msg.CreatedAt, &msg.UpdatedAt) + if err != nil { + return nil, err + } + messages = append(messages, msg) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return messages, nil +} + +func (s *SQLiteStore) GetSCSummaryStats(from time.Time) (scMetrics metrics.SCMetrics, err error) { + scStats := metrics.SCMetrics{} + scMetrics, err = s.GetTotalSCGeneratedAndProcessedAndEvaluated(from) + if err != nil { + return scMetrics, err + } + scStats.TotalChallenges = scMetrics.TotalChallenges + scStats.TotalChallengesProcessed = scMetrics.TotalChallengesProcessed + scStats.TotalChallengesEvaluatedByChallenger = scMetrics.TotalChallengesEvaluatedByChallenger + + observersEvaluations, err := s.GetObserversEvaluations(from) + if err != nil { + return scMetrics, err + } + log.WithField("observer_evaluations", len(observersEvaluations)).Info("observer evaluations retrieved") + + observerEvaluationMetrics := processObserverEvaluations(observersEvaluations) + log.WithField("observer_evaluation_metrics", len(observerEvaluationMetrics)).Info("observer evaluation metrics retrieved") + + for _, obMetrics := range observerEvaluationMetrics { + if obMetrics.ChallengesVerified > 2 { + scMetrics.TotalChallengesVerified++ + } else { + if obMetrics.FailedByInvalidTimestamps > 0 { + scMetrics.SlowResponsesObservedByObservers++ + } + if obMetrics.FailedByInvalidSignatures > 0 { + scMetrics.InvalidSignaturesObservedByObservers++ + } + if obMetrics.FailedByInvalidEvaluation > 0 { + scMetrics.InvalidEvaluationObservedByObservers++ + } + } + } + + return scMetrics, nil +} + +// InsertBroadcastMessage inserts broadcast storage challenge msg to db +func (s *SQLiteStore) InsertBroadcastMessage(challenge types.BroadcastLogMessage) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO broadcast_challenge_messages(id, challenge_id, data, challenger, recipient, observers, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?);" + _, err := s.db.Exec(insertQuery, challenge.ChallengeID, challenge.Data, challenge.Challenger, challenge.Recipient, challenge.Observers, now, now) + if err != nil { + return err + } + + return nil +} + +// StorageChallengeMetrics retrieves all the metrics needs to be broadcast +func (s *SQLiteStore) StorageChallengeMetrics(timestamp time.Time) ([]types.StorageChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM storage_challenge_metrics + WHERE created_at > ? + ` + + rows, err := s.db.Query(query, timestamp) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.StorageChallengeLogMessage + for rows.Next() { + var m types.StorageChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// QueryStorageChallengeMessage retrieves storage challenge message against challengeID and messageType +func (s *SQLiteStore) QueryStorageChallengeMessage(challengeID string, messageType int) (challengeMessage types.StorageChallengeLogMessage, err error) { + const selectQuery = "SELECT * FROM storage_challenge_messages WHERE challenge_id=? AND message_type=?" + err = s.db.QueryRow(selectQuery, challengeID, messageType).Scan( + &challengeMessage.ID, &challengeMessage.ChallengeID, &challengeMessage.MessageType, &challengeMessage.Data, + &challengeMessage.Sender, &challengeMessage.SenderSignature, &challengeMessage.CreatedAt, &challengeMessage.UpdatedAt) + + if err != nil { + return challengeMessage, err + } + + return challengeMessage, nil +} + +// CleanupStorageChallenges cleans up challenges stored in DB for self-healing +func (s *SQLiteStore) CleanupStorageChallenges() (err error) { + const delQuery = "DELETE FROM storage_challenge_messages" + _, err = s.db.Exec(delQuery) + return err +} + +// GetStorageChallengeMetricsByChallengeID retrieves all the metrics +func (s *SQLiteStore) GetStorageChallengeMetricsByChallengeID(challengeID string) ([]types.StorageChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM storage_challenge_metrics + WHERE challenge_id = ?;` + + rows, err := s.db.Query(query, challengeID) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.StorageChallengeLogMessage + for rows.Next() { + var m types.StorageChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetMetricsByChallengeIDAndMessageType retrieves all the metrics by challengeID and messageType +func (s *SQLiteStore) GetMetricsByChallengeIDAndMessageType(challengeID string, messageType types.MessageType) ([]types.StorageChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM storage_challenge_metrics + WHERE challenge_id = ? + AND message_type = ?;` + + rows, err := s.db.Query(query, challengeID, int(messageType)) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.StorageChallengeLogMessage + for rows.Next() { + var m types.StorageChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +func processObserverEvaluations(observersEvaluations []types.StorageChallengeLogMessage) map[string]ObserverEvaluationMetrics { + evaluationMap := make(map[string]ObserverEvaluationMetrics) + + for _, observerEvaluation := range observersEvaluations { + var oe types.MessageData + if err := json.Unmarshal(observerEvaluation.Data, &oe); err != nil { + continue + } + + oem, exists := evaluationMap[observerEvaluation.ChallengeID] + if !exists { + oem = ObserverEvaluationMetrics{} // Initialize if not exists + } + + if isObserverEvaluationVerified(oe.ObserverEvaluation) { + oem.ChallengesVerified++ + } else { + if !oe.ObserverEvaluation.IsChallengeTimestampOK || + !oe.ObserverEvaluation.IsProcessTimestampOK || + !oe.ObserverEvaluation.IsEvaluationTimestampOK { + oem.FailedByInvalidTimestamps++ + } + + if !oe.ObserverEvaluation.IsChallengerSignatureOK || + !oe.ObserverEvaluation.IsRecipientSignatureOK { + oem.FailedByInvalidSignatures++ + } + + if !oe.ObserverEvaluation.IsEvaluationResultOK { + oem.FailedByInvalidEvaluation++ + } + } + + evaluationMap[observerEvaluation.ChallengeID] = oem + } + + return evaluationMap +} + +func isObserverEvaluationVerified(observerEvaluation types.ObserverEvaluationData) bool { + if !observerEvaluation.IsEvaluationResultOK { + return false + } + + if !observerEvaluation.IsChallengerSignatureOK { + return false + } + + if !observerEvaluation.IsRecipientSignatureOK { + return false + } + + if !observerEvaluation.IsChallengeTimestampOK { + return false + } + + if !observerEvaluation.IsProcessTimestampOK { + return false + } + + if !observerEvaluation.IsEvaluationTimestampOK { + return false + } + + return true +} + +// GetDistinctChallengeIDsCountForScoreAggregation gets the count of distinct challenge ids for score aggregation +func (s *SQLiteStore) GetDistinctChallengeIDsCountForScoreAggregation(after, before time.Time) (int, error) { + query := ` + SELECT COUNT(DISTINCT challenge_id) + FROM storage_challenge_metrics + WHERE message_type = 4 AND created_at >= ? AND created_at < ? + ` + + var challengeIDsCount int + err := s.db.QueryRow(query, after, before).Scan(&challengeIDsCount) + if err != nil { + return 0, err + } + + return challengeIDsCount, nil +} + +// GetDistinctChallengeIDs retrieves the distinct challenge ids for score aggregation +func (s *SQLiteStore) GetDistinctChallengeIDs(after, before time.Time, batchNumber int) ([]string, error) { + offset := batchNumber * batchSizeForChallengeIDsRetrieval + + query := ` + SELECT DISTINCT challenge_id + FROM storage_challenge_metrics + WHERE message_type = 4 AND created_at >= ? AND created_at < ? + LIMIT ? OFFSET ? + ` + + rows, err := s.db.Query(query, after, before, batchSizeForChallengeIDsRetrieval, offset) + if err != nil { + return nil, err + } + defer rows.Close() + + var challengeIDs []string + for rows.Next() { + var challengeID string + if err := rows.Scan(&challengeID); err != nil { + return nil, err + } + challengeIDs = append(challengeIDs, challengeID) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return challengeIDs, nil +} + +// BatchInsertScoreAggregationChallenges inserts the batch of challenge ids for score aggregation +func (s *SQLiteStore) BatchInsertScoreAggregationChallenges(challengeIDs []string, isAggregated bool) error { + tx, err := s.db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(` + INSERT OR IGNORE INTO sc_score_aggregation_queue + (challenge_id, is_aggregated, created_at, updated_at) + VALUES (?,?,?,?) + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + for _, id := range challengeIDs { + now := time.Now().UTC() + + _, err = stmt.Exec(id, isAggregated, now, now) + if err != nil { + tx.Rollback() + return err + } + } + + // Commit the transaction + return tx.Commit() +} diff --git a/pkg/storage/queries/task_history.go b/pkg/storage/queries/task_history.go new file mode 100644 index 00000000..28a8572c --- /dev/null +++ b/pkg/storage/queries/task_history.go @@ -0,0 +1,70 @@ +package queries + +import ( + "fmt" + + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/types" + + json "github.com/json-iterator/go" +) + +type TaskHistoryQueries interface { + InsertTaskHistory(history types.TaskHistory) (int, error) + QueryTaskHistory(taskID string) (history []types.TaskHistory, err error) +} + +// InsertTaskHistory inserts task history +func (s *SQLiteStore) InsertTaskHistory(history types.TaskHistory) (hID int, err error) { + var stringifyDetails string + if history.Details != nil { + stringifyDetails = history.Details.Stringify() + } + + const insertQuery = "INSERT INTO task_history(id, time, task_id, status, details) VALUES(NULL,?,?,?,?);" + res, err := s.db.Exec(insertQuery, history.CreatedAt, history.TaskID, history.Status, stringifyDetails) + + if err != nil { + return 0, err + } + + var id int64 + if id, err = res.LastInsertId(); err != nil { + return 0, err + } + + return int(id), nil +} + +// QueryTaskHistory gets task history by taskID +func (s *SQLiteStore) QueryTaskHistory(taskID string) (history []types.TaskHistory, err error) { + const selectQuery = "SELECT * FROM task_history WHERE task_id = ? LIMIT 100" + rows, err := s.db.Query(selectQuery, taskID) + if err != nil { + return nil, err + } + defer rows.Close() + + var data []types.TaskHistory + for rows.Next() { + i := types.TaskHistory{} + var details string + err = rows.Scan(&i.ID, &i.CreatedAt, &i.TaskID, &i.Status, &details) + if err != nil { + return nil, err + } + + if details != emptyString { + err = json.Unmarshal([]byte(details), &i.Details) + if err != nil { + log.Info(details) + log.WithError(err).Error(fmt.Sprintf("cannot unmarshal task history details: %s", details)) + i.Details = nil + } + } + + data = append(data, i) + } + + return data, nil +} diff --git a/pkg/storage/rqstore/store.go b/pkg/storage/rqstore/store.go index f6fe52f2..bc62a40a 100644 --- a/pkg/storage/rqstore/store.go +++ b/pkg/storage/rqstore/store.go @@ -41,7 +41,7 @@ type SymbolDir struct { func NewSQLiteRQStore(file string) (*SQLiteRQStore, error) { db, err := sqlx.Connect("sqlite3", file) if err != nil { - return nil, fmt.Errorf("cannot open rq-service database: %w", err) + return nil, fmt.Errorf("cannot open rq-services database: %w", err) } // Create the rq_symbols_dir table if it doesn't exist diff --git a/pkg/testutil/lumera.go b/pkg/testutil/lumera.go index cdffd6da..a50166eb 100644 --- a/pkg/testutil/lumera.go +++ b/pkg/testutil/lumera.go @@ -114,6 +114,10 @@ func (m *MockSupernodeModule) GetSuperNode(ctx context.Context, address string) return &supernodeTypes.QueryGetSuperNodeResponse{}, nil } +func (m *MockSupernodeModule) GetSupernodeBySupernodeAddress(ctx context.Context, address string) (*supernodeTypes.SuperNode, error) { + return &supernodeTypes.SuperNode{}, nil +} + // MockTxModule implements the tx.Module interface for testing type MockTxModule struct{} @@ -132,6 +136,16 @@ func (m *MockTxModule) GetTx(ctx context.Context, hash string) (*sdktx.GetTxResp // MockNodeModule implements the node.Module interface for testing type MockNodeModule struct{} +// Sign implements node.Module. +func (m *MockNodeModule) Sign(snAccAddress string, data []byte) (signature []byte, err error) { + panic("unimplemented") +} + +// Verify implements node.Module. +func (m *MockNodeModule) Verify(accAddress string, data []byte, signature []byte) (err error) { + panic("unimplemented") +} + func (m *MockNodeModule) GetLatestBlock(ctx context.Context) (*cmtservice.GetLatestBlockResponse, error) { return &cmtservice.GetLatestBlockResponse{ SdkBlock: &cmtservice.Block{ diff --git a/pkg/types/healthcheck_challenge.go b/pkg/types/healthcheck_challenge.go new file mode 100644 index 00000000..a171138b --- /dev/null +++ b/pkg/types/healthcheck_challenge.go @@ -0,0 +1,164 @@ +package types + +import ( + "encoding/json" + "time" + + "github.com/LumeraProtocol/supernode/pkg/utils" +) + +// HealthCheckMessageType represents the type of message sent in the health-check process +type HealthCheckMessageType int + +const ( + // HealthCheckChallengeMessageType represents the challenge message + HealthCheckChallengeMessageType HealthCheckMessageType = iota + 1 + // HealthCheckResponseMessageType represents the response message + HealthCheckResponseMessageType + // HealthCheckEvaluationMessageType represents the evaluation message + HealthCheckEvaluationMessageType + // HealthCheckAffirmationMessageType represents the affirmation message + HealthCheckAffirmationMessageType + // HealthCheckBroadcastMessageType represents the broadcast message + HealthCheckBroadcastMessageType +) + +// String returns the message string +func (hcm HealthCheckMessageType) String() string { + switch hcm { + case HealthCheckChallengeMessageType: + return "challenge" + case HealthCheckResponseMessageType: + return "response" + case HealthCheckEvaluationMessageType: + return "evaluation" + case HealthCheckAffirmationMessageType: + return "affirmation" + default: + return "unknown" + } +} + +// BroadcastHealthCheckMessage represents the healthcheck challenge message that needs to be broadcast after evaluation +type BroadcastHealthCheckMessage struct { + ChallengeID string + Challenger map[string][]byte + Recipient map[string][]byte + Observers map[string][]byte +} + +// BroadcastHealthCheckLogMessage represents the broadcast message log to be stored in the DB +type BroadcastHealthCheckLogMessage struct { + ChallengeID string `db:"challenge_id"` + Challenger string `db:"challenger"` + Recipient string `db:"recipient"` + Observers string `db:"observers"` + Data []byte `db:"data"` +} + +// HealthCheckChallengeData represents the data of challenge +type HealthCheckChallengeData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` +} + +// HealthCheckResponseData represents the data of response +type HealthCheckResponseData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` +} + +// HealthCheckEvaluationData represents the data of evaluation +type HealthCheckEvaluationData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + IsVerified bool `json:"is_verified"` +} + +// HealthCheckObserverEvaluationData represents the data of Observer's evaluation +type HealthCheckObserverEvaluationData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + IsChallengeTimestampOK bool `json:"is_challenge_timestamp_ok"` + IsProcessTimestampOK bool `json:"is_process_timestamp_ok"` + IsEvaluationTimestampOK bool `json:"is_evaluation_timestamp_ok"` + IsRecipientSignatureOK bool `json:"is_recipient_signature_ok"` + IsChallengerSignatureOK bool `json:"is_challenger_signature_ok"` + IsEvaluationResultOK bool `json:"is_evaluation_result_ok"` + Timestamp time.Time `json:"timestamp"` +} + +// HealthCheckMessageData represents the health check challenge message data +type HealthCheckMessageData struct { + ChallengerID string `json:"challenger_id"` + Challenge HealthCheckChallengeData `json:"challenge"` + Observers []string `json:"observers"` + RecipientID string `json:"recipient_id"` + Response HealthCheckResponseData `json:"response"` + ChallengerEvaluation HealthCheckEvaluationData `json:"challenger_evaluation"` + ObserverEvaluation HealthCheckObserverEvaluationData `json:"observer_evaluation"` +} + +// HealthCheckMessage represents the healthcheck challenge message +type HealthCheckMessage struct { + MessageType HealthCheckMessageType `json:"message_type"` + ChallengeID string `json:"challenge_id"` + Data HealthCheckMessageData `json:"data"` + Sender string `json:"sender"` + SenderSignature []byte `json:"sender_signature"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// HealthCheckChallengeMetric represents the metric log to be stored in the DB +type HealthCheckChallengeMetric struct { + ID int `db:"id"` + MessageType int `db:"message_type"` + ChallengeID string `db:"challenge_id"` + Data []byte `db:"data"` + SenderID string `db:"sender_id"` +} + +// HealthCheckChallengeLogMessage represents the message log to be stored in the DB +type HealthCheckChallengeLogMessage struct { + ID int `db:"id"` + MessageType int `db:"message_type"` + ChallengeID string `db:"challenge_id"` + Data []byte `db:"data"` + Sender string `db:"sender_id"` + SenderSignature []byte `db:"sender_signature"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// BroadcastHealthCheckMessageMetrics is the struct for broadcast message metrics +type BroadcastHealthCheckMessageMetrics struct { + ID int `db:"id"` + ChallengeID string `db:"challenge_id"` + Challenger string `db:"challenger"` + Recipient string `db:"recipient"` + Observers string `db:"observers"` + Data []byte `db:"data"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// ProcessBroadcastHealthCheckChallengeMetricsRequest represents the request for broadcasting metrics +type ProcessBroadcastHealthCheckChallengeMetricsRequest struct { + Data []byte `json:"data"` + SenderID string `json:"sender_id"` +} + +// HealthCheckChallengeMessages represents an array of health-check message +type HealthCheckChallengeMessages []HealthCheckMessage + +// Hash returns the hash of the health-check-challenge challenge log data +func (mdl HealthCheckChallengeMessages) Hash() string { + data, _ := json.Marshal(mdl) + hash, _ := utils.Sha3256hash(data) + + return string(hash) +} diff --git a/pkg/types/self_healing.go b/pkg/types/self_healing.go new file mode 100644 index 00000000..0ba13081 --- /dev/null +++ b/pkg/types/self_healing.go @@ -0,0 +1,252 @@ +package types + +import ( + "database/sql" + "encoding/json" + "time" + + "github.com/LumeraProtocol/supernode/pkg/utils" +) + +// SelfHealingMessageType represents the type of message sent in the self-healing process +type SelfHealingMessageType int + +const ( + // SelfHealingChallengeMessage represents the challenge message + SelfHealingChallengeMessage SelfHealingMessageType = iota + 1 + // SelfHealingResponseMessage represents the response message + SelfHealingResponseMessage + // SelfHealingVerificationMessage represents the verification message + SelfHealingVerificationMessage + // SelfHealingCompletionMessage represents the challenge message processed successfully + SelfHealingCompletionMessage + // SelfHealingAcknowledgementMessage represents the acknowledgement message + SelfHealingAcknowledgementMessage +) + +func (s SelfHealingMessageType) String() string { + messages := [...]string{"", "challenge", "response", "verification", "completion", "acknowledgement"} + if s < 1 || int(s) >= len(messages) { + return "unknown" + } + + return messages[s] +} + +// TicketType represents the type of ticket; nft, cascade, sense +type TicketType int + +const ( + // TicketTypeCascade represents the cascade ticket type + TicketTypeCascade TicketType = iota + 1 + // TicketTypeSense represents the sense ticket type + TicketTypeSense + // TicketTypeNFT represents the NFT ticket type + TicketTypeNFT +) + +func (t TicketType) String() string { + tickets := [...]string{"", "cascade", "sense", "nft"} + if t < 1 || int(t) >= len(tickets) { + return "unknown" + } + + return tickets[t] +} + +// PingInfo represents the structure of data to be inserted into the ping_history table +type PingInfo struct { + ID int `db:"id"` + SupernodeID string `db:"supernode_id"` + IPAddress string `db:"ip_address"` + TotalPings int `db:"total_pings"` + TotalSuccessfulPings int `db:"total_successful_pings"` + AvgPingResponseTime float64 `db:"avg_ping_response_time"` + IsOnline bool `db:"is_online"` + IsOnWatchlist bool `db:"is_on_watchlist"` + IsAdjusted bool `db:"is_adjusted"` + CumulativeResponseTime float64 `db:"cumulative_response_time"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` + LastSeen sql.NullTime `db:"last_seen"` + MetricsLastBroadcastAt sql.NullTime `db:"metrics_last_broadcast_at"` + HealthCheckMetricsLastBroadcastAt sql.NullTime `db:"health_check_metrics_last_broadcast_at"` + GenerationMetricsLastBroadcastAt sql.NullTime `db:"generation_metrics_last_broadcast_at"` + ExecutionMetricsLastBroadcastAt sql.NullTime `db:"execution_metrics_last_broadcast_at"` + SCScoreLastAggregatedAt sql.NullTime `db:"sc_score_last_aggregated_at"` + LastResponseTime float64 `db:"-"` +} + +// PingInfos represents array of ping info +type PingInfos []PingInfo + +// SelfHealingReports represents the self-healing metrics for each challenge +type SelfHealingReports map[string]SelfHealingReport + +// SelfHealingReport represents the self-healing challenges +type SelfHealingReport map[string]SelfHealingMessages + +// SelfHealingMessages represents the self-healing metrics for each challenge = message_type = 3 +type SelfHealingMessages []SelfHealingMessage + +// SelfHealingMessage represents the self-healing message +type SelfHealingMessage struct { + TriggerID string `json:"trigger_id"` + MessageType SelfHealingMessageType `json:"message_type"` + SelfHealingMessageData SelfHealingMessageData `json:"data"` + SenderID string `json:"sender_id"` + SenderSignature []byte `json:"sender_signature"` +} + +// SelfHealingMessageData represents the self-healing message data == message_type = 2 +type SelfHealingMessageData struct { + ChallengerID string `json:"challenger_id"` + RecipientID string `json:"recipient_id"` + Challenge SelfHealingChallengeData `json:"challenge"` + Response SelfHealingResponseData `json:"response"` + Verification SelfHealingVerificationData `json:"verification"` +} + +// SelfHealingChallengeData represents the challenge data for self-healing sent by the challenger +type SelfHealingChallengeData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + ChallengeTickets []ChallengeTicket `json:"challenge_tickets"` + NodesOnWatchlist string `json:"nodes_on_watchlist"` +} + +// ChallengeTicket represents the ticket details for self-healing challenge +type ChallengeTicket struct { + TxID string `json:"tx_id"` + TicketType TicketType `json:"ticket_type"` + MissingKeys []string `json:"missing_keys"` + DataHash []byte `json:"data_hash"` + Recipient string `json:"recipient"` +} + +// RespondedTicket represents the details of ticket responded in a self-healing challenge +type RespondedTicket struct { + TxID string `json:"tx_id"` + TicketType TicketType `json:"ticket_type"` + MissingKeys []string `json:"missing_keys"` + ReconstructedFileHash []byte `json:"reconstructed_file_hash"` + IsReconstructionRequired bool `json:"is_reconstruction_required"` + Error string `json:"error"` +} + +// SelfHealingResponseData represents the response data for self-healing sent by the recipient +type SelfHealingResponseData struct { + ChallengeID string `json:"challenge_id"` + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + RespondedTicket RespondedTicket `json:"responded_ticket"` + Verifiers []string `json:"verifiers"` +} + +// VerifiedTicket represents the details of ticket verified in self-healing challenge +type VerifiedTicket struct { + TxID string `json:"tx_id"` + TicketType TicketType `json:"ticket_type"` + MissingKeys []string `json:"missing_keys"` + ReconstructedFileHash []byte `json:"reconstructed_file_hash"` + IsReconstructionRequired bool `json:"is_reconstruction_required"` + IsReconstructionRequiredByHealer bool `json:"is_reconstruction_required_by_healer"` + IsVerified bool `json:"is_verified"` + Message string `json:"message"` +} + +// SelfHealingVerificationData represents the verification data for self-healing challenge +type SelfHealingVerificationData struct { + NodeID string `json:"node_id"` + NodeAddress string `json:"node_address"` + ChallengeID string `json:"challenge_id"` + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + VerifiedTicket VerifiedTicket `json:"verified_ticket"` + VerifiersData map[string][]byte `json:"verifiers_data"` +} + +// SelfHealingGenerationMetric represents the self-healing generation metrics for trigger events +type SelfHealingGenerationMetric struct { + ID int `db:"id"` + TriggerID string `db:"trigger_id"` + MessageType int `db:"message_type"` + Data []byte `db:"data"` + SenderID string `db:"sender_id"` + SenderSignature []byte `db:"sender_signature"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// CombinedSelfHealingMetrics represents the combination of generation and execution metrics +type CombinedSelfHealingMetrics struct { + GenerationMetrics []SelfHealingGenerationMetric + ExecutionMetrics []SelfHealingExecutionMetric +} + +// SelfHealingExecutionMetric represents the self-healing execution metrics for trigger events +type SelfHealingExecutionMetric struct { + ID int `db:"id"` + TriggerID string `db:"trigger_id"` + ChallengeID string `db:"challenge_id"` + MessageType int `db:"message_type"` + Data []byte `db:"data"` + SenderID string `db:"sender_id"` + SenderSignature []byte `db:"sender_signature"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// SelfHealingMetricType represents the type of self-healing metric +type SelfHealingMetricType int + +const ( + // GenerationSelfHealingMetricType represents the generation metric for self-healing + GenerationSelfHealingMetricType SelfHealingMetricType = 1 + // ExecutionSelfHealingMetricType represents the execution metric for self-healing + ExecutionSelfHealingMetricType SelfHealingMetricType = 2 +) + +// ProcessBroadcastMetricsRequest represents the request for broadcasting metrics +type ProcessBroadcastMetricsRequest struct { + Data []byte `json:"data"` + Type SelfHealingMetricType `json:"type"` + SenderID string `json:"sender_id"` + SenderSignature []byte `json:"sender_signature"` +} + +// SelfHealingMetrics represents the self-healing metrics for each challenge +type SelfHealingMetrics struct { + ChallengeID string `db:"challenge_id"` + SentTicketsForSelfHealing int `db:"sent_tickets_for_self_healing"` + EstimatedMissingKeys int `db:"estimated_missing_keys"` + TicketsInProgress int `db:"tickets_in_progress"` + TicketsRequiredSelfHealing int `db:"tickets_required_self_healing"` + SuccessfullySelfHealedTickets int `db:"successfully_self_healed_tickets"` + SuccessfullyVerifiedTickets int `db:"successfully_verified_tickets"` +} + +// SelfHealingChallengeEvent represents the challenge event that needs to be healed. +type SelfHealingChallengeEvent struct { + ID int64 + TriggerID string + ChallengeID string + TicketID string + Data []byte + SenderID string + IsProcessed bool + ExecMetric SelfHealingExecutionMetric + CreatedAt time.Time + UpdatedAt time.Time +} + +// Hash returns the hash of the self-healing challenge reports +func (s SelfHealingReports) Hash() string { + data, _ := json.Marshal(s) + hash, _ := utils.Sha3256hash(data) + + return string(hash) +} diff --git a/pkg/types/storage_challenge.go b/pkg/types/storage_challenge.go new file mode 100644 index 00000000..2b0432e1 --- /dev/null +++ b/pkg/types/storage_challenge.go @@ -0,0 +1,249 @@ +package types + +import ( + "encoding/json" + "time" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/utils" +) + +// MessageType represents the type of message +type MessageType int + +const ( + // ChallengeMessageType represents the challenge message + ChallengeMessageType MessageType = iota + 1 + // ResponseMessageType represents the response message + ResponseMessageType + // EvaluationMessageType represents the evaluation message + EvaluationMessageType + // AffirmationMessageType represents the affirmation message + AffirmationMessageType + //BroadcastMessageType represents the message that needs to be broadcast + BroadcastMessageType +) + +// String returns the message string +func (m MessageType) String() string { + switch m { + case ChallengeMessageType: + return "challenge" + case ResponseMessageType: + return "response" + case EvaluationMessageType: + return "evaluation" + case AffirmationMessageType: + return "affirmation" + default: + return "unknown" + } +} + +// MessageTypeFromString returns the message type from string +func MessageTypeFromString(str string) (MessageType, error) { + switch str { + case "challenge": + return ChallengeMessageType, nil + case "response": + return ResponseMessageType, nil + case "evaluation": + return EvaluationMessageType, nil + case "affirmation": + return AffirmationMessageType, nil + default: + return 0, errors.New("invalid message type string") + } +} + +// StorageChallengeSignatures represents the signature struct for broadcasting +type StorageChallengeSignatures struct { + Challenger map[string]string `json:"challenger,omitempty"` + Recipient map[string]string `json:"recipient,omitempty"` + Obs map[string]string `json:"obs,omitempty"` +} + +// Message represents the storage challenge message +type Message struct { + MessageType MessageType `json:"message_type"` + ChallengeID string `json:"challenge_id"` + Data MessageData `json:"data"` + Sender string `json:"sender"` + SenderSignature []byte `json:"sender_signature"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// BroadcastMessage represents the storage challenge message that needs to be broadcast after evaluation +type BroadcastMessage struct { + ChallengeID string + Challenger map[string][]byte + Recipient map[string][]byte + Observers map[string][]byte +} + +type MessageDataList []MessageData + +// MessageData represents the storage challenge message data +type MessageData struct { + ChallengerID string `json:"challenger_id"` + Challenge ChallengeData `json:"challenge"` + Observers []string `json:"observers"` + RecipientID string `json:"recipient_id"` + Response ResponseData `json:"response"` + ChallengerEvaluation EvaluationData `json:"challenger_evaluation"` + ObserverEvaluation ObserverEvaluationData `json:"observer_evaluation"` +} + +// ChallengeData represents the data of challenge +type ChallengeData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + FileHash string `json:"file_hash"` + StartIndex int `json:"start_index"` + EndIndex int `json:"end_index"` +} + +// ResponseData represents the data of response +type ResponseData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Hash string `json:"hash"` + Timestamp time.Time `json:"timestamp"` +} + +// EvaluationData represents the data of evaluation +type EvaluationData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + Hash string `json:"hash"` + IsVerified bool `json:"is_verified"` +} + +// ObserverEvaluationData represents the data of Observer's evaluation +type ObserverEvaluationData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + IsChallengeTimestampOK bool `json:"is_challenge_timestamp_ok"` + IsProcessTimestampOK bool `json:"is_process_timestamp_ok"` + IsEvaluationTimestampOK bool `json:"is_evaluation_timestamp_ok"` + IsRecipientSignatureOK bool `json:"is_recipient_signature_ok"` + IsChallengerSignatureOK bool `json:"is_challenger_signature_ok"` + IsEvaluationResultOK bool `json:"is_evaluation_result_ok"` + Reason string `json:"reason"` + TrueHash string `json:"true_hash"` + Timestamp time.Time `json:"timestamp"` +} + +// StorageChallengeLogMessage represents the message log to be stored in the DB +type StorageChallengeLogMessage struct { + ID int `db:"id"` + MessageType int `db:"message_type"` + ChallengeID string `db:"challenge_id"` + Data []byte `db:"data"` + Sender string `db:"sender_id"` + SenderSignature []byte `db:"sender_signature"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// StorageChallengeMetric represents the metric log to be stored in the DB +type StorageChallengeMetric struct { + ID int `db:"id"` + MessageType int `db:"message_type"` + ChallengeID string `db:"challenge_id"` + Data []byte `db:"data"` + SenderID string `db:"sender_id"` +} + +// BroadcastLogMessage represents the broadcast message log to be stored in the DB +type BroadcastLogMessage struct { + ChallengeID string `db:"challenge_id"` + Challenger string `db:"challenger"` + Recipient string `db:"recipient"` + Observers string `db:"observers"` + Data []byte `db:"data"` +} + +// BroadcastMessageMetrics is the struct for broadcast message metrics +type BroadcastMessageMetrics struct { + ID int `db:"id"` + ChallengeID string `db:"challenge_id"` + Challenger string `db:"challenger"` + Recipient string `db:"recipient"` + Observers string `db:"observers"` + Data []byte `db:"data"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// ProcessBroadcastChallengeMetricsRequest represents the request for broadcasting metrics +type ProcessBroadcastChallengeMetricsRequest struct { + Data []byte `json:"data"` + SenderID string `json:"sender_id"` +} + +type StorageChallengeMessages []Message + +// Hash returns the hash of the storage-challenge challenge log data +func (mdl StorageChallengeMessages) Hash() string { + data, _ := json.Marshal(mdl) + hash, _ := utils.Sha3256hash(data) + + return string(hash) +} + +// NScMetric gets the latest challenge IDs from the DB +type NScMetric struct { + Count int + ChallengeID string + CreatedAt string +} + +// NHcMetric gets the latest health-check challenge IDs from the DB +type NHcMetric struct { + Count int + ChallengeID string + CreatedAt string +} + +type AccumulativeChallengeData struct { + NodeID string `db:"node_id"` + IPAddress string `db:"ip_address"` + TotalChallengesAsRecipients int `db:"total_challenges_as_recipients"` + TotalChallengesAsObservers int `db:"total_challenges_as_observers"` + TotalChallengesAsChallengers int `db:"total_challenges_as_challengers"` + CorrectChallengerEvaluations int `db:"correct_challenger_evaluations"` + CorrectObserverEvaluations int `db:"correct_observer_evaluations"` + CorrectRecipientEvaluations int `db:"correct_recipient_evaluations"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// AggregatedScore represents the structure of data in the aggregated_challenge_scores table +type AggregatedScore struct { + NodeID string + IPAddress string + StorageChallengeScore float64 + HealthCheckChallengeScore float64 + CreatedAt time.Time + UpdatedAt time.Time +} + +type AggregatedScoreList []AggregatedScore + +func (asl AggregatedScoreList) Hash() string { + data, _ := json.Marshal(asl) + hash, _ := utils.Sha3256hash(data) + + return string(hash) +} + +type ScoreAggregationEvent struct { + ChallengeID string `db:"challenge_id"` + IsAggregated bool `db:"is_aggregated"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} diff --git a/pkg/types/ticket.go b/pkg/types/ticket.go new file mode 100644 index 00000000..9698e0d1 --- /dev/null +++ b/pkg/types/ticket.go @@ -0,0 +1,89 @@ +package types + +import ( + "time" +) + +type File struct { + FileID string + UploadTimestamp time.Time + Path string + FileIndex string + BaseFileID string + TaskID string + RegTxid string + ActivationTxid string + ReqBurnTxnAmount float64 + BurnTxnID string + ReqAmount float64 + IsConcluded bool + CascadeMetadataTicketID string + UUIDKey string + HashOfOriginalBigFile string + NameOfOriginalBigFileWithExt string + SizeOfOriginalBigFile float64 + DataTypeOfOriginalBigFile string + StartBlock int32 + DoneBlock int + PastelID string + Passphrase string +} + +type Files []*File + +func (f Files) Names() []string { + names := make([]string, 0, len(f)) + for _, file := range f { + names = append(names, file.FileID) + } + return names +} + +type RegistrationAttempt struct { + ID int + FileID string + BaseFileID string + RegStartedAt time.Time + ProcessorSNS string + FinishedAt time.Time + IsSuccessful bool + IsConfirmed bool + ErrorMessage string +} + +type ActivationAttempt struct { + ID int + FileID string + BaseFileID string + ActivationAttemptAt time.Time + IsSuccessful bool + IsConfirmed bool + ErrorMessage string +} + +func (fs Files) GetUnconcludedFiles() (Files, error) { + var unconcludedFiles Files + for _, f := range fs { + if !f.IsConcluded { + unconcludedFiles = append(unconcludedFiles, f) + } + } + + return unconcludedFiles, nil +} + +func (fs Files) GetBase() *File { + for _, f := range fs { + if f.FileIndex == "0" { + return f + } + } + + return nil +} + +type MultiVolCascadeTicketTxIDMap struct { + ID int64 + MultiVolCascadeTicketTxid string + BaseFileID string +} diff --git a/pkg/types/types.go b/pkg/types/types.go new file mode 100644 index 00000000..030db278 --- /dev/null +++ b/pkg/types/types.go @@ -0,0 +1,120 @@ +package types + +import ( + "time" + + "github.com/LumeraProtocol/supernode/pkg/log" + + json "github.com/json-iterator/go" +) + +// MeshedSuperNode represents meshed sn +type MeshedSuperNode struct { + SessID string + NodeID string +} + +// TaskHistory represents task history +type TaskHistory struct { + ID int + TaskID string + CreatedAt time.Time + Status string + Details *Details +} + +// StorageChallengeStatus represents possible storage challenge statuses +type StorageChallengeStatus int + +const ( + //UndefinedStorageChallengeStatus represents invalid storage challenge type + UndefinedStorageChallengeStatus StorageChallengeStatus = iota + //GeneratedStorageChallengeStatus represents when the challenge is stored after generation + GeneratedStorageChallengeStatus + //ProcessedStorageChallengeStatus represents when the challenge is stored after processing + ProcessedStorageChallengeStatus + //VerifiedStorageChallengeStatus represents when the challenge is stored after verification + VerifiedStorageChallengeStatus +) + +// StorageChallenge represents storage challenge log +type StorageChallenge struct { + ID int64 + ChallengeID string + FileHash string + ChallengingNode string + RespondingNode string + VerifyingNodes string + GeneratedHash string + Status StorageChallengeStatus + StartingIndex int + EndingIndex int + CreatedAt time.Time + UpdatedAt time.Time +} + +// SelfHealingStatus represents possible self-healing statuses of failed challenge +type SelfHealingStatus string + +const ( + //UndefinedSelfHealingStatus represents invalid status for self-healing operation + UndefinedSelfHealingStatus SelfHealingStatus = "Undefined" + //CreatedSelfHealingStatus represents when the failed challenge gets stored in DB + CreatedSelfHealingStatus SelfHealingStatus = "Created" + //InProgressSelfHealingStatus represents when the challenge is retrieved for self-healing + InProgressSelfHealingStatus SelfHealingStatus = "InProgress" + //FailedSelfHealingStatus represents when the reconstruction has been completed + FailedSelfHealingStatus SelfHealingStatus = "Failed" + //CompletedSelfHealingStatus represents when the reconstruction has been completed + CompletedSelfHealingStatus SelfHealingStatus = "Completed" + //ReconstructionNotRequiredSelfHealingStatus represents when the reconstruction has been completed + ReconstructionNotRequiredSelfHealingStatus SelfHealingStatus = "ReconstructionNotRequired" +) + +// SelfHealingChallenge represents self-healing challenge +type SelfHealingChallenge struct { + ID int64 + ChallengeID string + MerkleRoot string + FileHash string + ChallengingNode string + RespondingNode string + VerifyingNode string + ReconstructedFileHash []byte + Status SelfHealingStatus + CreatedAt time.Time + UpdatedAt time.Time +} + +// Fields represents status log +type Fields map[string]interface{} + +// Details represents status log details with additional fields +type Details struct { + Message string + Fields Fields +} + +// Stringify convert the Details' struct to stringify json +func (d *Details) Stringify() string { + details, err := json.Marshal(&d) + if err != nil { + log.WithError(err).Error("unable to marshal task history details") + return "" + } + + return string(details) +} + +// NewDetails initializes and return the valid detail object +func NewDetails(msg string, fields Fields) *Details { + return &Details{ + Message: msg, + Fields: fields, + } +} + +// IsValid checks if the status log map is not empty +func (f Fields) IsValid() bool { + return len(f) != 0 +} diff --git a/proto/proto.go b/proto/proto.go new file mode 100644 index 00000000..34045007 --- /dev/null +++ b/proto/proto.go @@ -0,0 +1,6 @@ +package proto + +const ( + // MetadataKeySessID is unique numeric for every registration process, encompasses for all connections. + MetadataKeySessID = "sessID" +) diff --git a/proto/supernode/action/.gitkeep b/proto/supernode/action/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/proto/supernode/action/cascade/service.proto b/proto/supernode/action/cascade/service.proto new file mode 100644 index 00000000..d204180d --- /dev/null +++ b/proto/supernode/action/cascade/service.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package cascade; + +option go_package = "github.com/LumeraProtocol/supernode/gen/supernode/action/cascade"; + +service CascadeService { + rpc Session(stream SessionRequest) returns (stream SessionReply); + rpc UploadInputData (UploadInputDataRequest) returns (UploadInputDataResponse); +} + +message UploadInputDataRequest { + string filename = 1; + string action_id = 2; + string data_hash = 3; + int32 rq_max = 4; + string signed_data = 5; + bytes data = 6; +} + +message UploadInputDataResponse { + bool success = 1; + string message = 2; +} + +message SessionRequest { + bool is_primary = 1; +} + +message SessionReply { + string sessID = 1; +} diff --git a/supernode/cmd/service.go b/supernode/cmd/service.go new file mode 100644 index 00000000..3363e2fb --- /dev/null +++ b/supernode/cmd/service.go @@ -0,0 +1,33 @@ +package cmd + +import ( + "context" + "reflect" + + "github.com/LumeraProtocol/supernode/pkg/errgroup" + "github.com/LumeraProtocol/supernode/pkg/log" +) + +type service interface { + Run(context.Context) error +} + +func RunServices(ctx context.Context, services ...service) error { + group, ctx := errgroup.WithContext(ctx) + + for _, service := range services { + service := service + + group.Go(func() error { + err := service.Run(ctx) + if err != nil { + log.WithContext(ctx).WithError(err).Errorf("service %s stopped", reflect.TypeOf(service)) + } else { + log.WithContext(ctx).Warnf("service %s stopped", reflect.TypeOf(service)) + } + return err + }) + } + + return group.Wait() +} diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 1f372c25..7be4f29e 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -2,13 +2,26 @@ package cmd import ( "context" + "fmt" "log/slog" "os" "os/signal" "syscall" + "github.com/LumeraProtocol/supernode/p2p" + "github.com/LumeraProtocol/supernode/p2p/kademlia/store/cloud.go" + "github.com/LumeraProtocol/supernode/p2p/kademlia/store/sqlite" "github.com/LumeraProtocol/supernode/pkg/keyring" "github.com/LumeraProtocol/supernode/pkg/logtrace" + "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/LumeraProtocol/supernode/pkg/raptorq" + "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/supernode/config" + "github.com/LumeraProtocol/supernode/supernode/node/supernode/server" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" + "github.com/LumeraProtocol/supernode/supernode/services/common" + + cKeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/spf13/cobra" ) @@ -45,8 +58,26 @@ The supernode will connect to the Lumera network and begin participating in the return err } + // Initialize Lumera client + lumeraClient, err := initLumeraClient(ctx, appConfig) + if err != nil { + return fmt.Errorf("failed to initialize Lumera client: %w", err) + } + + // Initialize RaptorQ store for Cascade processing + rqStore, err := initRQStore(ctx, appConfig) + if err != nil { + return fmt.Errorf("failed to initialize RaptorQ store: %w", err) + } + + // Initialize P2P service + p2pService, err := initP2PService(ctx, appConfig, lumeraClient, kr, rqStore, nil, nil) + if err != nil { + return fmt.Errorf("failed to initialize P2P service: %w", err) + } + // Initialize the supernode (next step) - supernode, err := NewSupernode(ctx, appConfig, kr) + _, err = NewSupernode(ctx, appConfig, kr, p2pService, rqStore, lumeraClient) if err != nil { logtrace.Error(ctx, "Failed to initialize supernode", logtrace.Fields{ "error": err.Error(), @@ -54,14 +85,46 @@ The supernode will connect to the Lumera network and begin participating in the return err } - // Start the supernode - if err := supernode.Start(ctx); err != nil { - logtrace.Error(ctx, "Failed to start supernode", logtrace.Fields{ + // Initialize RaptorQ client connection + raptorQClientConnection, err := raptorq.NewClient().Connect(ctx, appConfig.RaptorQConfig.ServiceAddress) + if err != nil { + logtrace.Error(ctx, "Failed to initialize raptor q client connection interface", logtrace.Fields{ "error": err.Error(), }) return err } + // Configure cascade service + cascadeService := cascade.NewCascadeService( + &cascade.Config{ + Config: common.Config{ + SupernodeAccountAddress: appConfig.SupernodeConfig.KeyName, + }, + RaptorQServicePort: fmt.Sprintf("%d", appConfig.RaptorQConfig.ServicePort), + RaptorQServiceAddress: appConfig.RaptorQConfig.ServiceAddress, + RqFilesDir: appConfig.RaptorQConfig.FilesDir, + NumberConnectedNodes: 1, + }, + lumeraClient, + nil, + *p2pService, + raptorQClientConnection.RaptorQ(raptorq.NewConfig(), lumeraClient, rqStore), + raptorq.NewClient(), + rqStore, + ) + + serverConfig := &server.Config{ + ListenAddresses: appConfig.SupernodeConfig.IpAddress, // FIXME : confirm + Port: int(appConfig.SupernodeConfig.Port), // FIXME : confirm + } + grpc := server.New(serverConfig, + "service", + cascadeService, + ) + + // Start the services + RunServices(ctx, grpc, cascadeService, *p2pService) + // Set up signal handling for graceful shutdown sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) @@ -79,3 +142,40 @@ The supernode will connect to the Lumera network and begin participating in the func init() { rootCmd.AddCommand(startCmd) } + +// initP2PService initializes the P2P service +func initP2PService(ctx context.Context, config *config.Config, lumeraClient lumera.Client, kr cKeyring.Keyring, rqStore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore) (*p2p.P2P, error) { + // Get the supernode address from the keyring + keyInfo, err := kr.Key(config.SupernodeConfig.KeyName) + if err != nil { + return nil, fmt.Errorf("key not found: %w", err) + } + address, err := keyInfo.GetAddress() + if err != nil { + return nil, fmt.Errorf("failed to get address from key: %w", err) + } + + // Initialize P2P service + p2pConfig := &p2p.Config{ + ListenAddress: config.P2PConfig.ListenAddress, + Port: config.P2PConfig.Port, + DataDir: config.P2PConfig.DataDir, + BootstrapNodes: config.P2PConfig.BootstrapNodes, + ExternalIP: config.P2PConfig.ExternalIP, + ID: address.String(), + } + + logtrace.Info(ctx, "Initializing P2P service", logtrace.Fields{ + "listen_address": p2pConfig.ListenAddress, + "port": p2pConfig.Port, + "data_dir": p2pConfig.DataDir, + "supernode_id": address.String(), + }) + + p2pService, err := p2p.New(ctx, p2pConfig, lumeraClient, kr, rqStore, cloud, mst) + if err != nil { + return nil, fmt.Errorf("failed to initialize p2p service: %w", err) + } + + return &p2pService, nil +} diff --git a/supernode/cmd/supernode.go b/supernode/cmd/supernode.go index 28b0f91f..1eb85aac 100644 --- a/supernode/cmd/supernode.go +++ b/supernode/cmd/supernode.go @@ -25,29 +25,19 @@ type Supernode struct { } // NewSupernode creates a new supernode instance -func NewSupernode(ctx context.Context, config *config.Config, kr keyring.Keyring) (*Supernode, error) { +func NewSupernode(ctx context.Context, config *config.Config, kr keyring.Keyring, + p2pClient *p2p.P2P, rqStore rqstore.Store, lumeraClient lumera.Client) (*Supernode, error) { + if config == nil { return nil, fmt.Errorf("config is nil") } - // Initialize Lumera client - lumeraClient, err := initLumeraClient(ctx, config) - if err != nil { - return nil, fmt.Errorf("failed to initialize Lumera client: %w", err) - } - - // Initialize RaptorQ store for Cascade processing - rqStore, err := initRQStore(ctx, config) - if err != nil { - return nil, fmt.Errorf("failed to initialize RaptorQ store: %w", err) - } - - // Create the supernode instance supernode := &Supernode{ config: config, lumeraClient: lumeraClient, keyring: kr, rqStore: rqStore, + p2pService: *p2pClient, keyName: config.SupernodeConfig.KeyName, } diff --git a/supernode/config.yml b/supernode/config.yml index d9dc433a..802200a5 100644 --- a/supernode/config.yml +++ b/supernode/config.yml @@ -23,4 +23,10 @@ p2p: lumera: grpc_addr: "localhost:9090" chain_id: "lumera" - timeout: 10 # Connection timeout in seconds \ No newline at end of file + timeout: 10 # Connection timeout in seconds + +# RaptorQ Configuration +raptorq: + service_address: "0.0.0.0" + service_port: 1234 + files_dir: "~/.supernode/raptorq_files" diff --git a/supernode/config/config.go b/supernode/config/config.go index 81c01848..c34eb1b6 100644 --- a/supernode/config/config.go +++ b/supernode/config/config.go @@ -37,11 +37,18 @@ type LumeraClientConfig struct { Timeout int `yaml:"timeout"` } +type RaptorQConfig struct { + ServiceAddress string `yaml:"service_address"` + ServicePort uint16 `yaml:"service_port"` + FilesDir string `yaml:"files_dir"` +} + type Config struct { SupernodeConfig `yaml:"supernode"` KeyringConfig `yaml:"keyring"` P2PConfig `yaml:"p2p"` LumeraClientConfig `yaml:"lumera"` + RaptorQConfig `yaml:"raptorq"` } // LoadConfig loads the configuration from a file @@ -102,6 +109,14 @@ func LoadConfig(filename string) (*Config, error) { } } + // Process RaptorQConfig + if config.RaptorQConfig.FilesDir != "" { + config.RaptorQConfig.FilesDir = expandPath(config.RaptorQConfig.FilesDir, homeDir) + if err := os.MkdirAll(config.RaptorQConfig.FilesDir, 0700); err != nil { + return nil, fmt.Errorf("failed to create RaptorQ files directory: %w", err) + } + } + logtrace.Info(ctx, "Configuration loaded successfully", logtrace.Fields{}) return &config, nil } diff --git a/supernode/node/action/server/cascade/cascade_action_server.go b/supernode/node/action/server/cascade/cascade_action_server.go new file mode 100644 index 00000000..751cec60 --- /dev/null +++ b/supernode/node/action/server/cascade/cascade_action_server.go @@ -0,0 +1,20 @@ +package cascade + +import ( + cascadeGen "github.com/LumeraProtocol/supernode/gen/supernode/action/cascade" + "github.com/LumeraProtocol/supernode/supernode/node/common" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" +) + +type CascadeActionServer struct { + cascadeGen.UnimplementedCascadeServiceServer + + *common.RegisterCascade +} + +// NewCascadeActionServer returns a new CascadeActionServer instance. +func NewCascadeActionServer(service *cascade.CascadeService) *CascadeActionServer { + return &CascadeActionServer{ + RegisterCascade: common.NewRegisterCascade(service), + } +} diff --git a/supernode/node/action/server/cascade/session.go b/supernode/node/action/server/cascade/session.go new file mode 100644 index 00000000..496913bb --- /dev/null +++ b/supernode/node/action/server/cascade/session.go @@ -0,0 +1,73 @@ +package cascade + +import ( + "context" + "io" + + pb "github.com/LumeraProtocol/supernode/gen/supernode/action/cascade" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" +) + +// Session implements CascadeActionServer RegisterCascadeServer.Session() +func (service *CascadeActionServer) Session(stream pb.CascadeService_SessionServer) error { + ctx, cancel := context.WithCancel(stream.Context()) + defer cancel() + + var task *cascade.CascadeRegistrationTask + + if sessID, ok := service.SessID(ctx); ok { + if task = service.Task(sessID); task == nil { + return errors.Errorf("not found %q task", sessID) + } + } else { + task = service.NewCascadeRegistrationTask() + } + go func() { + <-task.Done() + cancel() + }() + defer task.Cancel() + + peer, _ := peer.FromContext(ctx) + + defer log.WithContext(ctx).WithField("addr", peer.Addr).Debug("Session stream closed") + + req, err := stream.Recv() + if err != nil { + return errors.Errorf("receieve handshake request: %w", err) + } + + if err := task.NetworkHandler.Session(ctx, req.IsPrimary); err != nil { + return err + } + + resp := &pb.SessionReply{ + SessID: task.ID(), + } + + if err := stream.Send(resp); err != nil { + return errors.Errorf("send handshake response: %w", err) + } + log.WithContext(ctx).WithField("resp", resp).Debug("Session response") + + for { + if _, err := stream.Recv(); err != nil { + if err == io.EOF { + return nil + } + switch status.Code(err) { + case codes.Canceled: + log.WithContext(ctx).WithError(err).Error("handshake stream canceled") + return nil + case codes.Unavailable: + return nil + } + return errors.Errorf("handshake stream closed: %w", err) + } + } +} diff --git a/supernode/node/action/server/cascade/upload_cascade_action_input.go b/supernode/node/action/server/cascade/upload_cascade_action_input.go new file mode 100644 index 00000000..67a81e1e --- /dev/null +++ b/supernode/node/action/server/cascade/upload_cascade_action_input.go @@ -0,0 +1,40 @@ +package cascade + +import ( + "context" + "fmt" + + pb "github.com/LumeraProtocol/supernode/gen/supernode/action/cascade" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + cascadeService "github.com/LumeraProtocol/supernode/supernode/services/cascade" +) + +func (s *CascadeActionServer) UploadInputData(ctx context.Context, req *pb.UploadInputDataRequest) (*pb.UploadInputDataResponse, error) { + fields := logtrace.Fields{ + logtrace.FieldMethod: "UploadInputData", + logtrace.FieldModule: "CascadeActionServer", + logtrace.FieldRequest: req, + } + logtrace.Info(ctx, "request to upload cascade input data received", fields) + + task, err := s.TaskFromMD(ctx) + if err != nil { + return nil, err + } + + res, err := task.UploadInputData(ctx, &cascadeService.UploadInputDataRequest{ + Filename: req.Filename, + ActionID: req.ActionId, + DataHash: req.DataHash, + RqMax: req.RqMax, + SignedData: req.SignedData, + Data: req.Data, + }) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to upload input data", fields) + return &pb.UploadInputDataResponse{}, fmt.Errorf("cascade services upload input data error: %w", err) + } + + return &pb.UploadInputDataResponse{Success: res.Success, Message: res.Message}, nil +} diff --git a/supernode/node/common/register_cascade.go b/supernode/node/common/register_cascade.go new file mode 100644 index 00000000..9f4ef70f --- /dev/null +++ b/supernode/node/common/register_cascade.go @@ -0,0 +1,51 @@ +package common + +import ( + "context" + + "google.golang.org/grpc/metadata" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/proto" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" +) + +// RegisterCascade represents common grpc services for registration sense. +type RegisterCascade struct { + *cascade.CascadeService +} + +// SessID retrieves SessID from the metadata. +func (service *RegisterCascade) SessID(ctx context.Context) (string, bool) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "", false + } + + mdVals := md.Get(proto.MetadataKeySessID) + if len(mdVals) == 0 { + return "", false + } + return mdVals[0], true +} + +// TaskFromMD returns task by SessID from the metadata. +func (service *RegisterCascade) TaskFromMD(ctx context.Context) (*cascade.CascadeRegistrationTask, error) { + sessID, ok := service.SessID(ctx) + if !ok { + return nil, errors.New("not found sessID in metadata") + } + + task := service.Task(sessID) + if task == nil { + return nil, errors.Errorf("not found %q task", sessID) + } + return task, nil +} + +// NewRegisterCascade returns a new RegisterSense instance. +func NewRegisterCascade(service *cascade.CascadeService) *RegisterCascade { + return &RegisterCascade{ + CascadeService: service, + } +} diff --git a/supernode/node/supernode/client/client.go b/supernode/node/supernode/client/client.go new file mode 100644 index 00000000..3c1d6a30 --- /dev/null +++ b/supernode/node/supernode/client/client.go @@ -0,0 +1,50 @@ +package client + +import ( + "context" + "time" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + netgrpcclient "github.com/LumeraProtocol/supernode/pkg/net/grpc/client" + "github.com/LumeraProtocol/supernode/pkg/random" + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + _ "google.golang.org/grpc/keepalive" +) + +// this implements SN's GRPC methods that call another SN during Cascade Registration +// meaning - these methods implements client side of SN to SN GRPC communication + +type Client struct { + *netgrpcclient.Client + KeyRing keyring.Keyring + SuperNodeAccAddress string +} + +// Connect implements node.Client.Connect() +func (c *Client) Connect(ctx context.Context, address string) (node.ConnectionInterface, error) { + clientOptions := netgrpcclient.DefaultClientOptions() + clientOptions.ConnWaitTime = 30 * time.Minute + clientOptions.MinConnectTimeout = 30 * time.Minute + clientOptions.EnableRetries = false + + id, _ := random.String(8, random.Base62Chars) + + grpcConn, err := c.Client.Connect(ctx, address, clientOptions) + if err != nil { + log.WithContext(ctx).WithError(err).Error("DialContext err") + return nil, errors.Errorf("dial address %s: %w", address, err) + } + + log.WithContext(ctx).Debugf("Connected to %s", address) + + conn := newClientConn(id, grpcConn) + + go func() { + //<-conn.Done() + log.WithContext(ctx).Debugf("Disconnected %s", grpcConn.Target()) + }() + + return conn, nil +} diff --git a/supernode/node/supernode/client/connection.go b/supernode/node/supernode/client/connection.go new file mode 100644 index 00000000..59c7d631 --- /dev/null +++ b/supernode/node/supernode/client/connection.go @@ -0,0 +1,21 @@ +package client + +import ( + "google.golang.org/grpc" + + "github.com/LumeraProtocol/supernode/supernode/node/supernode" +) + +// clientConn represents grpc client connection. +type clientConn struct { + *grpc.ClientConn + + id string +} + +func newClientConn(id string, conn *grpc.ClientConn) supernode.ConnectionInterface { + return &clientConn{ + ClientConn: conn, + id: id, + } +} diff --git a/supernode/node/supernode/client/session.go b/supernode/node/supernode/client/session.go new file mode 100644 index 00000000..d325e7d6 --- /dev/null +++ b/supernode/node/supernode/client/session.go @@ -0,0 +1,13 @@ +package client + +import ( + "context" + + "github.com/LumeraProtocol/supernode/proto" + "google.golang.org/grpc/metadata" +) + +func ContextWithMDSessID(ctx context.Context, sessID string) context.Context { + md := metadata.Pairs(proto.MetadataKeySessID, sessID) + return metadata.NewOutgoingContext(ctx, md) +} diff --git a/supernode/node/supernode/node_client_interface.go b/supernode/node/supernode/node_client_interface.go new file mode 100644 index 00000000..019b56bc --- /dev/null +++ b/supernode/node/supernode/node_client_interface.go @@ -0,0 +1,32 @@ +package supernode + +import ( + "context" +) + +// ClientInterface represents a base connection interface. +type ClientInterface interface { + // Connect connects to the server at the given address. + Connect(ctx context.Context, address string) (ConnectionInterface, error) +} + +// ConnectionInterface represents a client connection +type ConnectionInterface interface { + // Close closes connection. + Close() error +} + +// SuperNodePeerAPIInterface base interface for other Node API interfaces +type SuperNodePeerAPIInterface interface { + // SessID returns the taskID received from the server during the handshake. + SessID() (taskID string) + // Session sets up an initial connection with primary supernode, by telling sessID and its own nodeID. + Session(ctx context.Context, nodeID, sessID string) (err error) +} + +// revive:disable:exported + +// NodeMaker interface to make concrete node types +type NodeMaker interface { + MakeNode(conn ConnectionInterface) SuperNodePeerAPIInterface +} diff --git a/supernode/node/supernode/server/common/register_cascade.go b/supernode/node/supernode/server/common/register_cascade.go new file mode 100644 index 00000000..08cdc0ad --- /dev/null +++ b/supernode/node/supernode/server/common/register_cascade.go @@ -0,0 +1,56 @@ +package common + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/proto" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" +) + +// RegisterCascade represents common grpc services for registration sense. +type RegisterCascade struct { + *cascade.CascadeService +} + +// SessID retrieves SessID from the metadata. +func (service *RegisterCascade) SessID(ctx context.Context) (string, bool) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "", false + } + + mdVals := md.Get(proto.MetadataKeySessID) + if len(mdVals) == 0 { + return "", false + } + return mdVals[0], true +} + +// TaskFromMD returns task by SessID from the metadata. +func (service *RegisterCascade) TaskFromMD(ctx context.Context) (*cascade.CascadeRegistrationTask, error) { + sessID, ok := service.SessID(ctx) + if !ok { + return nil, errors.New("not found sessID in metadata") + } + + task := service.Task(sessID) + if task == nil { + return nil, errors.Errorf("not found %q task", sessID) + } + return task, nil +} + +func (service *RegisterCascade) Desc() *grpc.ServiceDesc { + return &grpc.ServiceDesc{ServiceName: "supernode.RegisterCascade", HandlerType: (*RegisterCascade)(nil)} +} + +// NewRegisterCascade returns a new RegisterSense instance. +func NewRegisterCascade(service *cascade.CascadeService) *RegisterCascade { + return &RegisterCascade{ + CascadeService: service, + } +} diff --git a/supernode/node/supernode/server/config.go b/supernode/node/supernode/server/config.go new file mode 100644 index 00000000..722857e0 --- /dev/null +++ b/supernode/node/supernode/server/config.go @@ -0,0 +1,20 @@ +package server + +const ( + defaultListenAddresses = "0.0.0.0" + defaultPort = 4444 +) + +// Config contains settings of the supernode server. +type Config struct { + ListenAddresses string `mapstructure:"listen_addresses" json:"listen_addresses,omitempty"` + Port int `mapstructure:"port" json:"port,omitempty"` +} + +// NewConfig returns a new Config instance. +func NewConfig() *Config { + return &Config{ + ListenAddresses: defaultListenAddresses, + Port: defaultPort, + } +} diff --git a/supernode/node/supernode/server/server.go b/supernode/node/supernode/server/server.go new file mode 100644 index 00000000..4ea26161 --- /dev/null +++ b/supernode/node/supernode/server/server.go @@ -0,0 +1,141 @@ +package server + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + + "github.com/LumeraProtocol/supernode/pkg/errgroup" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" +) + +type service interface { + Desc() *grpc.ServiceDesc +} + +// Server represents supernode server +type Server struct { + config *Config + services []service + name string + //secClient alts.SecClient + //secInfo *alts.SecInfo +} + +// Run starts the server +func (server *Server) Run(ctx context.Context) error { + grpclog.SetLoggerV2(log.NewLoggerWithErrorLevel()) + ctx = log.ContextWithPrefix(ctx, server.name) + + group, ctx := errgroup.WithContext(ctx) + + addresses := strings.Split(server.config.ListenAddresses, ",") + grpcServer := server.grpcServer(ctx) + if grpcServer == nil { + return fmt.Errorf("initialize grpc server failed") + } + + for _, address := range addresses { + addr := net.JoinHostPort(strings.TrimSpace(address), strconv.Itoa(server.config.Port)) + + group.Go(func() error { + return server.listen(ctx, addr, grpcServer) + }) + } + + return group.Wait() +} + +func (server *Server) listen(ctx context.Context, address string, grpcServer *grpc.Server) (err error) { + listen, err := net.Listen("tcp", address) + if err != nil { + return errors.Errorf("listen: %w", err).WithField("address", address) + } + + // The listener that will track connections. + /*listen = &connTrackListener{ + Listener: listen, + connTrack: server.connTrack, // connection tracker + }*/ + + errCh := make(chan error, 1) + go func() { + defer errors.Recover(func(recErr error) { err = recErr }) + log.WithContext(ctx).Infof("gRPC server listening on %q", address) + if err := grpcServer.Serve(listen); err != nil { + errCh <- errors.Errorf("serve: %w", err).WithField("address", address) + } + }() + + select { + case <-ctx.Done(): + log.WithContext(ctx).Infof("Shutting down gRPC server at %q", address) + grpcServer.GracefulStop() + case err := <-errCh: + return err + } + + return nil +} + +func (server *Server) grpcServer(ctx context.Context) *grpc.Server { + //if server.secClient == nil || server.secInfo == nil { + // log.WithContext(ctx).Errorln("secClient or secInfo don't initialize") + // return nil + //} + + //// Define the keep-alive parameters + //kaParams := keepalive.ServerParameters{ + // MaxConnectionIdle: 2 * time.Hour, + // MaxConnectionAge: 2 * time.Hour, + // MaxConnectionAgeGrace: 1 * time.Hour, + // Time: 1 * time.Hour, + // Timeout: 30 * time.Minute, + //} + // + //// Define the keep-alive enforcement policy + //kaPolicy := keepalive.EnforcementPolicy{ + // MinTime: 3 * time.Minute, // Minimum time a client should wait before sending keep-alive probes + // PermitWithoutStream: true, // Only allow pings when there are active streams + //} + + var grpcServer *grpc.Server + //if os.Getenv("INTEGRATION_TEST_ENV") == "true" { + // grpcServer = grpc.NewServer(middleware.UnaryInterceptor(), middleware.StreamInterceptor(), grpc.MaxSendMsgSize(100000000), + // grpc.MaxRecvMsgSize(100000000), grpc.KeepaliveParams(kaParams), // Use the keep-alive parameters + // grpc.KeepaliveEnforcementPolicy(kaPolicy)) + //} else { + // + // grpcServer = grpc.NewServer(middleware.UnaryInterceptor(), middleware.StreamInterceptor(), + // middleware.AltsCredential(server.secClient, server.secInfo), grpc.MaxSendMsgSize(100000000), + // grpc.MaxRecvMsgSize(100000000), grpc.KeepaliveParams(kaParams), // Use the keep-alive parameters + // grpc.KeepaliveEnforcementPolicy(kaPolicy)) + //} + + for _, service := range server.services { + log.WithContext(ctx).Debugf("Register services %q", service.Desc().ServiceName) + grpcServer.RegisterService(service.Desc(), service) + } + + return grpcServer +} + +// New returns a new Server instance. +func New(config *Config, name string, + //secClient alts.SecClient, + //secInfo *alts.SecInfo, + services ...service) *Server { + return &Server{ + config: config, + //secClient: secClient, + //secInfo: secInfo, + services: services, + name: name, + } +} diff --git a/supernode/services/cascade/config.go b/supernode/services/cascade/config.go new file mode 100644 index 00000000..78e1578f --- /dev/null +++ b/supernode/services/cascade/config.go @@ -0,0 +1,28 @@ +package cascade + +import ( + "github.com/LumeraProtocol/supernode/supernode/services/common" +) + +const ( + defaultNumberConnectedNodes = 2 + defaultPreburntTxMinConfirmations = 3 +) + +// Config contains settings of the registering Nft. +type Config struct { + common.Config `mapstructure:",squash" json:"-"` + + RaptorQServiceAddress string `mapstructure:"-" json:"-"` + RaptorQServicePort string `mapstructure:"-" json:"-"` + RqFilesDir string + + NumberConnectedNodes int `mapstructure:"-" json:"number_connected_nodes,omitempty"` +} + +// NewConfig returns a new Config instance. +func NewConfig() *Config { + return &Config{ + NumberConnectedNodes: defaultNumberConnectedNodes, + } +} diff --git a/supernode/services/cascade/service.go b/supernode/services/cascade/service.go new file mode 100644 index 00000000..632c91a7 --- /dev/null +++ b/supernode/services/cascade/service.go @@ -0,0 +1,76 @@ +package cascade + +import ( + "context" + + "github.com/LumeraProtocol/supernode/p2p" + "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/LumeraProtocol/supernode/pkg/raptorq" + "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" + "github.com/LumeraProtocol/supernode/supernode/services/common" + + "google.golang.org/grpc" +) + +type CascadeService struct { + *common.SuperNodeService + config *Config + + lumeraClient lumera.Client + raptorQClient raptorq.ClientInterface + nodeClient node.ClientInterface + + rqstore rqstore.Store + raptorQ raptorq.RaptorQ +} + +func (s *CascadeService) Desc() *grpc.ServiceDesc { + return &grpc.ServiceDesc{ServiceName: "cascade supernode service"} +} + +// NewCascadeRegistrationTask runs a new task of the registration Sense and returns its taskID. +func (s *CascadeService) NewCascadeRegistrationTask() *CascadeRegistrationTask { + task := NewCascadeRegistrationTask(s) + s.Worker.AddTask(task) + + return task +} + +// Run starts task +func (service *CascadeService) Run(ctx context.Context) error { + return service.RunHelper(ctx, service.config.SupernodeAccountAddress, logPrefix) +} + +// Task returns the task of the Sense registration by the given id. +func (s *CascadeService) Task(id string) *CascadeRegistrationTask { + if s.Worker.Task(id) == nil { + return nil + } + + return s.Worker.Task(id).(*CascadeRegistrationTask) +} + +// NewCascadeService returns a new CascadeService instance. +func NewCascadeService(config *Config, + lumera lumera.Client, + nodeClient node.ClientInterface, + p2pClient p2p.Client, + rqC raptorq.RaptorQ, + rqClient raptorq.ClientInterface, + rqstore rqstore.Store, +) *CascadeService { + return &CascadeService{ + config: config, + SuperNodeService: common.NewSuperNodeService(p2pClient), + lumeraClient: lumera, + nodeClient: nodeClient, + raptorQ: rqC, + raptorQClient: rqClient, + rqstore: rqstore, + } +} + +func (s *CascadeService) GetSNAddress() string { + return s.config.SupernodeAccountAddress // FIXME : verify +} diff --git a/supernode/services/cascade/task.go b/supernode/services/cascade/task.go new file mode 100644 index 00000000..19e53ec0 --- /dev/null +++ b/supernode/services/cascade/task.go @@ -0,0 +1,64 @@ +package cascade + +import ( + "context" + + "github.com/LumeraProtocol/supernode/pkg/raptorq" + "github.com/LumeraProtocol/supernode/pkg/storage/files" + "github.com/LumeraProtocol/supernode/supernode/services/common" +) + +type RQInfo struct { + rqIDsIC uint32 + rqIDs []string + rqIDEncodeParams raptorq.EncoderParameters + + rqIDsFile []byte + rawRqFile []byte + rqIDFiles [][]byte +} + +// CascadeRegistrationTask is the task of registering new Sense. +type CascadeRegistrationTask struct { + RQInfo + *CascadeService + + *common.SuperNodeTask + *common.RegTaskHelper + storage *common.StorageHandler + + Asset *files.File // TODO : remove + assetSizeBytes int + dataHash string + + creatorSignature []byte +} + +const ( + logPrefix = "cascade" +) + +// Run starts the task +func (task *CascadeRegistrationTask) Run(ctx context.Context) error { + return task.RunHelper(ctx, task.removeArtifacts) +} + +func (task *CascadeRegistrationTask) removeArtifacts() { + task.RemoveFile(task.Asset) +} + +// NewCascadeRegistrationTask returns a new Task instance. +func NewCascadeRegistrationTask(service *CascadeService) *CascadeRegistrationTask { + + task := &CascadeRegistrationTask{ + SuperNodeTask: common.NewSuperNodeTask(logPrefix), + CascadeService: service, + storage: common.NewStorageHandler(service.P2PClient, service.raptorQClient, + service.config.RaptorQServiceAddress, service.config.RqFilesDir, service.rqstore), + } + + task.RegTaskHelper = common.NewRegTaskHelper(task.SuperNodeTask, service.lumeraClient, common.NewNetworkHandler( + task.SuperNodeTask, service.nodeClient, nil, service.lumeraClient, service.config.NumberConnectedNodes)) + + return task +} diff --git a/supernode/services/cascade/upload.go b/supernode/services/cascade/upload.go new file mode 100644 index 00000000..8497c42f --- /dev/null +++ b/supernode/services/cascade/upload.go @@ -0,0 +1,170 @@ +package cascade + +import ( + "context" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + "github.com/LumeraProtocol/supernode/pkg/lumera/modules/supernode" + "github.com/LumeraProtocol/supernode/pkg/raptorq" + "github.com/LumeraProtocol/supernode/supernode/services/common" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type UploadInputDataRequest struct { + ActionID string + Filename string + DataHash string + RqMax int32 + SignedData string + Data []byte +} + +type UploadInputDataResponse struct { + Success bool + Message string +} + +func (task *CascadeRegistrationTask) UploadInputData(ctx context.Context, req *UploadInputDataRequest) (*UploadInputDataResponse, error) { + fields := logtrace.Fields{ + logtrace.FieldMethod: "UploadInputData", + logtrace.FieldRequest: req, + } + + actionRes, err := task.lumeraClient.Action().GetAction(ctx, req.ActionID) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to get action", fields) + return nil, status.Errorf(codes.Internal, "failed to get action") + } + if actionRes.GetAction().ActionID == "" { + logtrace.Error(ctx, "action not found", fields) + return nil, status.Errorf(codes.Internal, "action not found") + } + actionDetails := actionRes.GetAction() + logtrace.Info(ctx, "action has been retrieved", fields) + + latestBlock, err := task.lumeraClient.Node().GetLatestBlock(ctx) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to get latest block", fields) + return nil, status.Errorf(codes.Internal, "failed to get latest block") + } + latestBlockHeight := uint64(latestBlock.GetSdkBlock().GetHeader().Height) + latestBlockHash := latestBlock.GetBlockId().GetHash() + fields[logtrace.FieldBlockHeight] = latestBlockHeight + logtrace.Info(ctx, "latest block has been retrieved", fields) + + topSNsRes, err := task.lumeraClient.SuperNode().GetTopSuperNodesForBlock(ctx, latestBlockHeight) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to get top SNs", fields) + return nil, status.Errorf(codes.Internal, "failed to get top SNs") + } + logtrace.Info(ctx, "top sns have been fetched", fields) + + if !supernode.Exists(topSNsRes.Supernodes, task.config.SupernodeAccountAddress) { + logtrace.Error(ctx, "current supernode do not exist in the top sns list", fields) + return nil, status.Errorf(codes.Internal, "current supernode does not exist in the top sns list") + } + logtrace.Info(ctx, "current supernode exists in the top sns list", fields) + + if req.DataHash != actionDetails.Metadata.GetCascadeMetadata().DataHash { + logtrace.Error(ctx, "data hash doesn't match", fields) + return nil, status.Errorf(codes.Internal, "data hash doesn't match") + } + logtrace.Info(ctx, "request data-hash has been matched with the action data-hash", fields) + + res, err := task.raptorQ.GenRQIdentifiersFiles(ctx, raptorq.GenRQIdentifiersFilesRequest{ + TaskID: task.ID(), + BlockHash: string(latestBlockHash), + Data: req.Data, + CreatorSNAddress: actionDetails.GetCreator(), + RqMax: uint32(actionDetails.Metadata.GetCascadeMetadata().RqMax), + SignedData: req.SignedData, + LC: task.lumeraClient, + }) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to generate RQID Files", fields) + return nil, status.Errorf(codes.Internal, "failed to generate RQID Files") + } + logtrace.Info(ctx, "rq symbols, rq-ids and rqid-files have been generated", fields) + + task.RQInfo.rqIDsIC = res.RQIDsIc + task.RQInfo.rqIDs = res.RQIDs + task.RQInfo.rqIDFiles = res.RQIDsFiles + task.RQInfo.rqIDsFile = res.RQIDsFile + task.RQInfo.rqIDEncodeParams = res.RQEncodeParams + task.creatorSignature = res.CreatorSignature + + // TODO : MsgFinalizeAction + + if err = task.storeIDFiles(ctx); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "error storing id files to p2p", fields) + return nil, status.Errorf(codes.Internal, "error storing id files to p2p") + } + logtrace.Info(ctx, "id files have been stored", fields) + + if err = task.storeRaptorQSymbols(ctx); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "error storing raptor-q symbols", fields) + return nil, status.Errorf(codes.Internal, "error storing raptor-q symbols") + } + logtrace.Info(ctx, "raptor-q symbols have been stored", fields) + + return &UploadInputDataResponse{ + Success: true, + Message: "successfully uploaded input data", + }, nil +} + +func (task *CascadeRegistrationTask) storeIDFiles(ctx context.Context) error { + ctx = context.WithValue(ctx, log.TaskIDKey, task.ID()) + task.storage.TaskID = task.ID() + if err := task.storage.StoreBatch(ctx, task.RQInfo.rqIDFiles, common.P2PDataCascadeMetadata); err != nil { + return errors.Errorf("store ID files into kademlia: %w", err) + } + return nil +} + +func (task *CascadeRegistrationTask) storeRaptorQSymbols(ctx context.Context) error { + return task.storage.StoreRaptorQSymbolsIntoP2P(ctx, task.ID()) +} + +//// validates RQIDs file +//func (task *CascadeRegistrationTask) validateRqIDs(ctx context.Context, dd []byte, ticket *ct.CascadeTicket) error { +// snAccAddresses := []string{ticket.Creator} +// +// var err error +// task.rawRqFile, task.rqIDFiles, err = task.ValidateIDFiles(ctx, dd, +// ticket.RQIDsIC, uint32(ticket.RQIDsMax), +// ticket.RQIDs, 1, +// snAccAddresses, +// task.lumeraClient, +// ticket.CreatorSignature, +// ) +// if err != nil { +// return errors.Errorf("validate rq_ids file: %w", err) +// } +// +// return nil +//} +// +//// validates actual RQ Symbol IDs inside RQIDs file +//func (task *CascadeRegistrationTask) validateRQSymbolID(ctx context.Context, ticket *ct.CascadeTicket) error { +// +// content, err := task.Asset.Bytes() +// if err != nil { +// return errors.Errorf("read image contents: %w", err) +// } +// +// return task.storage.ValidateRaptorQSymbolIDs(ctx, +// content /*uint32(len(task.Ticket.AppTicketData.RQIDs))*/, 1, +// hex.EncodeToString([]byte(ticket.BlockHash)), ticket.Creator, +// task.rawRqFile) +//} diff --git a/supernode/services/common/config.go b/supernode/services/common/config.go new file mode 100644 index 00000000..684d1fd1 --- /dev/null +++ b/supernode/services/common/config.go @@ -0,0 +1,19 @@ +package common + +const ( + defaultNumberSuperNodes = 10 +) + +// Config contains common configuration of the services. +type Config struct { + SupernodeAccountAddress string + SupernodeIPAddress string + NumberSuperNodes int +} + +// NewConfig returns a new Config instance +func NewConfig() *Config { + return &Config{ + NumberSuperNodes: defaultNumberSuperNodes, + } +} diff --git a/supernode/services/common/network_handler.go b/supernode/services/common/network_handler.go new file mode 100644 index 00000000..2ff7d1c6 --- /dev/null +++ b/supernode/services/common/network_handler.go @@ -0,0 +1,256 @@ +package common + +import ( + "context" + "fmt" + "sync" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/lumera" + supernode "github.com/LumeraProtocol/supernode/pkg/lumera/modules/supernode" + "github.com/LumeraProtocol/supernode/pkg/types" + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" +) + +// NetworkHandler common functionality related for SNs Mesh and other interconnections +type NetworkHandler struct { + task *SuperNodeTask + lumeraHandler lumera.Client + + nodeMaker node.NodeMaker + NodeClient node.ClientInterface + + acceptedMu sync.Mutex + Accepted SuperNodePeerList + + meshedNodes []types.MeshedSuperNode + // valid only for secondary node + ConnectedTo *SuperNodePeer + + superNodeAccAddress string + minNumberConnectedNodes int +} + +// NewNetworkHandler creates instance of NetworkHandler +func NewNetworkHandler(task *SuperNodeTask, + nodeClient node.ClientInterface, + nodeMaker node.NodeMaker, + lc lumera.Client, + minNumberConnectedNodes int, +) *NetworkHandler { + return &NetworkHandler{ + task: task, + nodeMaker: nodeMaker, + lumeraHandler: lc, + NodeClient: nodeClient, + minNumberConnectedNodes: minNumberConnectedNodes, + } +} + +// MeshedNodes return SupernodeAccountAddresses of meshed nodes +func (h *NetworkHandler) MeshedNodes() []string { + var ids []string + for _, peer := range h.meshedNodes { + ids = append(ids, peer.NodeID) + } + return ids +} + +// Session is handshake wallet to supernode +func (h *NetworkHandler) Session(ctx context.Context, isPrimary bool) error { + if err := h.task.RequiredStatus(StatusTaskStarted); err != nil { + return err + } + + <-h.task.NewAction(func(ctx context.Context) error { + if isPrimary { + log.WithContext(ctx).Debug("Acts as primary node") + h.task.UpdateStatus(StatusPrimaryMode) + return nil + } + + log.WithContext(ctx).Debug("Acts as secondary node") + h.task.UpdateStatus(StatusSecondaryMode) + + return nil + }) + return nil +} + +// AcceptedNodes waits for connection supernodes, as soon as there is the required amount returns them. +func (h *NetworkHandler) AcceptedNodes(serverCtx context.Context) (SuperNodePeerList, error) { + if err := h.task.RequiredStatus(StatusPrimaryMode); err != nil { + return nil, fmt.Errorf("AcceptedNodes: %w", err) + } + + <-h.task.NewAction(func(ctx context.Context) error { + log.WithContext(ctx).Debug("Waiting for supernodes to connect") + + sub := h.task.SubscribeStatus() + for { + select { + case <-serverCtx.Done(): + return nil + case <-ctx.Done(): + return nil + case status := <-sub(): + if status.Is(StatusConnected) { + return nil + } + } + } + }) + return h.Accepted, nil +} + +// SessionNode accepts secondary node +func (h *NetworkHandler) SessionNode(_ context.Context, nodeID string) error { + h.acceptedMu.Lock() + defer h.acceptedMu.Unlock() + + if err := h.task.RequiredStatus(StatusPrimaryMode); err != nil { + return fmt.Errorf("SessionNode: %w", err) + } + + var err error + + <-h.task.NewAction(func(ctx context.Context) error { + if node := h.Accepted.ByID(nodeID); node != nil { + log.WithContext(ctx).WithField("nodeID", nodeID).Errorf("node is already registered") + err = errors.Errorf("node %q is already registered", nodeID) + return nil + } + + var someNode *SuperNodePeer + someNode, err = h.toSupernodePeer(ctx, nodeID) + if err != nil { + log.WithContext(ctx).WithField("nodeID", nodeID).WithError(err).Errorf("get node by extID") + err = errors.Errorf("get node by extID %s: %w", nodeID, err) + return nil + } + h.Accepted.Add(someNode) + + log.WithContext(ctx).WithField("nodeID", nodeID).Debug("Accept secondary node") + + if len(h.Accepted) >= h.minNumberConnectedNodes { + h.task.UpdateStatus(StatusConnected) + } + return nil + }) + return err +} + +// ConnectTo connects to primary node +func (h *NetworkHandler) ConnectTo(_ context.Context, nodeID, sessID string) error { + if err := h.task.RequiredStatus(StatusSecondaryMode); err != nil { + return err + } + + var err error + + <-h.task.NewAction(func(ctx context.Context) error { + var someNode *SuperNodePeer + someNode, err = h.toSupernodePeer(ctx, nodeID) + if err != nil { + log.WithContext(ctx).WithField("nodeID", nodeID).WithError(err).Errorf("get node by extID") + return nil + } + + if err := someNode.Connect(ctx); err != nil { + log.WithContext(ctx).WithField("nodeID", nodeID).WithError(err).Errorf("connect to node") + return nil + } + + if err = someNode.Session(ctx, h.superNodeAccAddress, sessID); err != nil { + log.WithContext(ctx).WithField("sessID", sessID).WithField("sn-acc-address", h.superNodeAccAddress).WithError(err).Errorf("handshake with peer") + return nil + } + + h.ConnectedTo = someNode + h.task.UpdateStatus(StatusConnected) + return nil + }) + return err +} + +// MeshNodes to set info of all meshed supernodes - that will be to send +func (h *NetworkHandler) MeshNodes(_ context.Context, meshedNodes []types.MeshedSuperNode) error { + if err := h.task.RequiredStatus(StatusConnected); err != nil { + return err + } + h.meshedNodes = meshedNodes + + return nil +} + +// CheckNodeInMeshedNodes checks if the node is in the active mesh (by nodeID) +func (h *NetworkHandler) CheckNodeInMeshedNodes(nodeID string) error { + if h.meshedNodes == nil { + return errors.New("nil meshedNodes") + } + + for _, node := range h.meshedNodes { + if node.NodeID == nodeID { + return nil + } + } + + return errors.New("nodeID not found") +} + +// toSupernodePeer returns information about SN by its account-address +func (h *NetworkHandler) toSupernodePeer(ctx context.Context, supernodeAccountAddress string) (*SuperNodePeer, error) { + sn, err := h.lumeraHandler.SuperNode().GetSupernodeBySupernodeAddress(ctx, supernodeAccountAddress) + if err != nil { + return nil, err + } + + supernodeIP, err := supernode.GetLatestIP(sn) + if err != nil { + return nil, err + } + + someNode := NewSuperNode(h.NodeClient, supernodeIP, supernodeAccountAddress, h.nodeMaker) + return someNode, nil +} + +// Connect connects to grpc Server and setup pointer to concrete client wrapper +func (node *SuperNodePeer) Connect(ctx context.Context) error { + connCtx, connCancel := context.WithTimeout(ctx, defaultConnectToNodeTimeout) + defer connCancel() + + conn, err := node.ClientInterface.Connect(connCtx, node.Address) + if err != nil { + return err + } + + node.ConnectionInterface = conn + node.SuperNodePeerAPIInterface = node.MakeNode(conn) + return nil +} + +func (h *NetworkHandler) CloseSNsConnections(ctx context.Context) error { + for _, node := range h.Accepted { + if node.ConnectionInterface != nil { + if err := node.Close(); err != nil { + log.WithContext(ctx).WithError(err).Errorf("close connection to node %s", node.ID) + } + } else { + log.WithContext(ctx).Errorf("node %s has no connection", node.ID) + } + + } + + if h.ConnectedTo != nil { + if err := h.ConnectedTo.Close(); err != nil { + log.WithContext(ctx).WithError(err).Errorf("close connection to node %s", h.ConnectedTo.ID) + } + } + + return nil +} + +func (h *NetworkHandler) IsPrimary() bool { + return h.ConnectedTo == nil +} diff --git a/supernode/services/common/node_peer.go b/supernode/services/common/node_peer.go new file mode 100644 index 00000000..07ae4f7d --- /dev/null +++ b/supernode/services/common/node_peer.go @@ -0,0 +1,82 @@ +package common + +import ( + "time" + + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" +) + +const ( + defaultConnectToNodeTimeout = time.Second * 35 +) + +// SuperNodePeer represents a single supernode +type SuperNodePeer struct { + node.ClientInterface + node.NodeMaker + node.ConnectionInterface + node.SuperNodePeerAPIInterface + + ID string + Address string +} + +//// Connect connects to grpc Server and setup pointer to concrete client wrapper +//func (node *SuperNodePeer) Connect(ctx context.Context) error { +// connCtx, connCancel := context.WithTimeout(ctx, defaultConnectToNodeTimeout) +// defer connCancel() +// +// conn, err := node.ClientInterface.Connect(connCtx, node.Address) +// if err != nil { +// return err +// } +// +// node.ConnectionInterface = conn +// node.SuperNodePeerAPIInterface = node.MakeNode(conn) +// return nil +//} + +// NewSuperNode returns a new Node instance. +func NewSuperNode( + client node.ClientInterface, + address string, nodeAddress string, + nodeMaker node.NodeMaker) *SuperNodePeer { + return &SuperNodePeer{ + ClientInterface: client, + NodeMaker: nodeMaker, + Address: address, + ID: nodeAddress, + } +} + +// SuperNodePeerList represents muptiple SenseRegistrationNodes +type SuperNodePeerList []*SuperNodePeer + +// Add adds a new node to the list +func (list *SuperNodePeerList) Add(node *SuperNodePeer) { + *list = append(*list, node) +} + +// ByID returns a node from the list by the given id. +func (list *SuperNodePeerList) ByID(id string) *SuperNodePeer { + for _, someNode := range *list { + if someNode.ID == id { + return someNode + } + } + return nil +} + +// Remove removes a node from the list by the given id. +func (list *SuperNodePeerList) Remove(id string) { + for i, someNode := range *list { + if someNode.ID == id { + if i+1 < len(*list) { + *list = append((*list)[:i], (*list)[i+1:]...) + } else { + *list = (*list)[:i] + } + break + } + } +} diff --git a/supernode/services/common/p2p.go b/supernode/services/common/p2p.go new file mode 100644 index 00000000..a477a591 --- /dev/null +++ b/supernode/services/common/p2p.go @@ -0,0 +1,21 @@ +package common + +const ( + // UnknownDataType ... + UnknownDataType = iota // 1 + + // P2PDataRaptorQSymbol rq symbol + P2PDataRaptorQSymbol // 1 + // P2PDataCascadeMetadata cascade ID file + P2PDataCascadeMetadata // 2 + // P2PDataDDMetadata dd fp metadata file + P2PDataDDMetadata // 3 + // P2PPreviewThumbnail preview NFT thumbnail + P2PPreviewThumbnail // 4 + // P2PMediumThumbnail NFT medium thumbnail + P2PMediumThumbnail // 5 + // P2PSmallThumbnail small NFT thumbnail + P2PSmallThumbnail // 6 + // P2PDebug debug + P2PDebug // 7 +) diff --git a/supernode/services/common/reg_task_helper.go b/supernode/services/common/reg_task_helper.go new file mode 100644 index 00000000..718b42ba --- /dev/null +++ b/supernode/services/common/reg_task_helper.go @@ -0,0 +1,140 @@ +package common + +import ( + "bytes" + "context" + "sync" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/LumeraProtocol/supernode/pkg/raptorq" + "github.com/LumeraProtocol/supernode/pkg/utils" +) + +const ( + SeparatorByte = 46 +) + +// RegTaskHelper common operations related to (any) Ticket registration +type RegTaskHelper struct { + *SuperNodeTask + + NetworkHandler *NetworkHandler + LumeraHandler *lumera.Client + + peersTicketSignatureMtx *sync.Mutex + PeersTicketSignature map[string][]byte + AllSignaturesReceivedChn chan struct{} +} + +// NewRegTaskHelper creates instance of RegTaskHelper +func NewRegTaskHelper(task *SuperNodeTask, + lumeraClient lumera.Client, + NetworkHandler *NetworkHandler, +) *RegTaskHelper { + return &RegTaskHelper{ + SuperNodeTask: task, + LumeraHandler: &lumeraClient, + NetworkHandler: NetworkHandler, + peersTicketSignatureMtx: &sync.Mutex{}, + PeersTicketSignature: make(map[string][]byte), + AllSignaturesReceivedChn: make(chan struct{}), + } +} + +// AddPeerTicketSignature waits for ticket signatures from other SNs and adds them into internal array +func (h *RegTaskHelper) AddPeerTicketSignature(nodeID string, signature []byte, reqStatus Status) error { + h.peersTicketSignatureMtx.Lock() + defer h.peersTicketSignatureMtx.Unlock() + + if err := h.RequiredStatus(reqStatus); err != nil { + return err + } + + var err error + + <-h.NewAction(func(ctx context.Context) error { + log.WithContext(ctx).Debugf("receive NFT ticket signature from node %s", nodeID) + if node := h.NetworkHandler.Accepted.ByID(nodeID); node == nil { + log.WithContext(ctx).WithField("node", nodeID).Errorf("node is not in Accepted list") + err = errors.Errorf("node %s not in Accepted list", nodeID) + return nil + } + + h.PeersTicketSignature[nodeID] = signature + if len(h.PeersTicketSignature) == len(h.NetworkHandler.Accepted) { + log.WithContext(ctx).Debug("all signature received") + go func() { + close(h.AllSignaturesReceivedChn) + }() + } + return nil + }) + return err +} + +// ValidateIDFiles validates received (IDs) file and its (50) IDs: +// 1. checks signatures +// 2. generates list of 50 IDs and compares them to received +func (h *RegTaskHelper) ValidateIDFiles(ctx context.Context, + data []byte, ic uint32, max uint32, ids []string, numSignRequired int, + snAccAddresses []string, + lumeraClient lumera.Client, + creatorSignaure []byte, +) ([]byte, [][]byte, error) { + + dec, err := utils.B64Decode(data) + if err != nil { + return nil, nil, errors.Errorf("decode data: %w", err) + } + + decData, err := utils.Decompress(dec) + if err != nil { + return nil, nil, errors.Errorf("decompress: %w", err) + } + + splits := bytes.Split(decData, []byte{SeparatorByte}) + if len(splits) != numSignRequired+1 { + return nil, nil, errors.New("invalid data") + } + + file, err := utils.B64Decode(splits[0]) + if err != nil { + return nil, nil, errors.Errorf("decode file: %w", err) + } + + verifications := 0 + verifiedNodes := make(map[int]bool) + for i := 1; i < numSignRequired+1; i++ { + for j := 0; j < len(snAccAddresses); j++ { + if _, ok := verifiedNodes[j]; ok { + continue + } + + err := lumeraClient.Node().Verify(snAccAddresses[j], file, creatorSignaure) // TODO : verify the signature + if err != nil { + return nil, nil, errors.Errorf("verify file signature %w", err) + } + + verifiedNodes[j] = true + verifications++ + break + } + } + + if verifications != numSignRequired { + return nil, nil, errors.Errorf("file verification failed: need %d verifications, got %d", numSignRequired, verifications) + } + + gotIDs, idFiles, err := raptorq.GetIDFiles(ctx, decData, ic, max) + if err != nil { + return nil, nil, errors.Errorf("get ids: %w", err) + } + + if err := utils.EqualStrList(gotIDs, ids); err != nil { + return nil, nil, errors.Errorf("IDs don't match: %w", err) + } + + return file, idFiles, nil +} diff --git a/supernode/services/common/service.go b/supernode/services/common/service.go new file mode 100644 index 00000000..5366cb96 --- /dev/null +++ b/supernode/services/common/service.go @@ -0,0 +1,72 @@ +package common + +import ( + "context" + "time" + + "github.com/LumeraProtocol/supernode/p2p" + "github.com/LumeraProtocol/supernode/pkg/common/task" + "github.com/LumeraProtocol/supernode/pkg/errgroup" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" +) + +// SuperNodeServiceInterface common interface for Services +type SuperNodeServiceInterface interface { + RunHelper(ctx context.Context) error + NewTask() task.Task + Task(id string) task.Task +} + +// SuperNodeService common "class" for Services +type SuperNodeService struct { + *task.Worker + // *files.Storage + + P2PClient p2p.Client +} + +// run starts task +func (service *SuperNodeService) run(ctx context.Context, nodeID string, prefix string) error { + ctx = log.ContextWithPrefix(ctx, prefix) + + if nodeID == "" { + return errors.New("PastelID is not specified in the config file") + } + + group, ctx := errgroup.WithContext(ctx) + group.Go(func() error { + return service.Worker.Run(ctx) + }) + + return group.Wait() +} + +// RunHelper common code for Service runner +func (service *SuperNodeService) RunHelper(ctx context.Context, nodeID string, prefix string) error { + for { + select { + case <-ctx.Done(): + log.WithContext(ctx).Error("context done - closing sn services") + return nil + case <-time.After(5 * time.Second): + if err := service.run(ctx, nodeID, prefix); err != nil { + service.Worker = task.NewWorker() + log.WithContext(ctx).WithError(err).Error("Service run failed, retrying") + } else { + log.WithContext(ctx).Info("Service run completed successfully - closing sn services") + return nil + } + } + } +} + +// NewSuperNodeService creates SuperNodeService +func NewSuperNodeService( + p2pClient p2p.Client, +) *SuperNodeService { + return &SuperNodeService{ + Worker: task.NewWorker(), + P2PClient: p2pClient, + } +} diff --git a/supernode/services/common/status.go b/supernode/services/common/status.go new file mode 100644 index 00000000..53af3527 --- /dev/null +++ b/supernode/services/common/status.go @@ -0,0 +1,124 @@ +package common + +// List of task statuses. +const ( + StatusTaskStarted Status = iota + + // Mode + StatusPrimaryMode + StatusSecondaryMode + + // Process + StatusConnected + + StatusImageProbed + StatusAssetUploaded + StatusImageAndThumbnailCoordinateUploaded + StatusRegistrationFeeCalculated + StatusFileDecoded + + // Error + StatusErrorInvalidBurnTxID + StatusRequestTooLate + StatusNftRegGettingFailed + StatusNftRegDecodingFailed + StatusNftRegTicketInvalid + StatusListTradeTicketsFailed + StatusTradeTicketsNotFound + StatusTradeTicketMismatched + StatusTimestampVerificationFailed + StatusTimestampInvalid + StatusRQServiceConnectionFailed + StatusSymbolFileNotFound + StatusSymbolFileInvalid + StatusSymbolNotFound + StatusSymbolMismatched + StatusSymbolsNotEnough + StatusFileDecodingFailed + StatusFileReadingFailed + StatusFileMismatched + StatusFileEmpty + StatusKeyNotFound + StatusFileRestoreFailed + StatusFileExists + + // Final + StatusTaskCanceled + StatusTaskCompleted +) + +var statusNames = map[Status]string{ + StatusTaskStarted: "Task started", + StatusPrimaryMode: "Primary Mode", + StatusSecondaryMode: "Secondary Mode", + StatusConnected: "Connected", + StatusImageProbed: "Image Probed", + StatusAssetUploaded: "Asset Uploaded", + StatusImageAndThumbnailCoordinateUploaded: "Imaged And Thumbnail Coordinate Uploaded", + StatusRegistrationFeeCalculated: "Registration Fee Caculated", + StatusFileDecoded: "File Decoded", + StatusErrorInvalidBurnTxID: "Error Invalid Burn TxID", + StatusRequestTooLate: "Request too late", + StatusNftRegGettingFailed: "NFT registered getting failed", + StatusNftRegDecodingFailed: "NFT registered decoding failed", + StatusNftRegTicketInvalid: "NFT registered ticket invalid", + StatusListTradeTicketsFailed: "Could not get available trade tickets", + StatusTradeTicketsNotFound: "Trade tickets not found", + StatusTradeTicketMismatched: "Trade ticket mismatched", + StatusTimestampVerificationFailed: "Could not verify timestamp", + StatusTimestampInvalid: "Timestamp invalid", + StatusRQServiceConnectionFailed: "RQ Service connection failed", + StatusSymbolFileNotFound: "Symbol file not found", + StatusSymbolFileInvalid: "Symbol file invalid", + StatusSymbolNotFound: "Symbol not found", + StatusSymbolMismatched: "Symbol mismatched", + StatusSymbolsNotEnough: "Symbols not enough", + StatusFileDecodingFailed: "File decoding failed", + StatusFileReadingFailed: "File reading failed", + StatusFileEmpty: "File empty", + StatusFileMismatched: "File mismatched", + StatusKeyNotFound: "Key not found", + StatusFileExists: "File hash exists", + StatusFileRestoreFailed: "File restore failed", + StatusTaskCanceled: "Task Canceled", + StatusTaskCompleted: "Task Completed", +} + +// Status represents status of the task +type Status byte + +func (status Status) String() string { + if name, ok := statusNames[status]; ok { + return name + } + return "" +} + +// IsFinal returns true if the status is the final. +func (status Status) IsFinal() bool { + return status == StatusTaskCanceled || status == StatusTaskCompleted +} + +// IsFailure returns true if the task failed due to an error +func (status Status) IsFailure() bool { + return status == StatusTaskCanceled || status == StatusRequestTooLate || + status == StatusNftRegGettingFailed || status == StatusNftRegDecodingFailed || + status == StatusNftRegTicketInvalid || status == StatusListTradeTicketsFailed || + status == StatusTradeTicketsNotFound || status == StatusTradeTicketMismatched || + status == StatusTimestampVerificationFailed || status == StatusTimestampInvalid || + status == StatusRQServiceConnectionFailed || status == StatusSymbolFileNotFound || + status == StatusSymbolFileInvalid || status == StatusSymbolNotFound || + status == StatusSymbolMismatched || status == StatusSymbolsNotEnough || + status == StatusFileDecodingFailed || status == StatusFileReadingFailed || + status == StatusFileEmpty || status == StatusFileMismatched || + status == StatusKeyNotFound || status == StatusFileRestoreFailed || status == StatusFileExists +} + +// StatusNames returns a sorted list of status names. +func StatusNames() []string { + list := make([]string, len(statusNames)) + for i, name := range statusNames { + list[i] = name + } + return list +} diff --git a/supernode/services/common/status_test.go b/supernode/services/common/status_test.go new file mode 100644 index 00000000..3f6de1be --- /dev/null +++ b/supernode/services/common/status_test.go @@ -0,0 +1,350 @@ +package common + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStatusNames(t *testing.T) { + t.Parallel() + + testCases := []struct { + expectedStatues []Status + }{ + { + expectedStatues: []Status{ + StatusTaskStarted, + StatusPrimaryMode, + StatusSecondaryMode, + + // Process + StatusConnected, + StatusImageProbed, + StatusAssetUploaded, + StatusImageAndThumbnailCoordinateUploaded, + StatusRegistrationFeeCalculated, + StatusFileDecoded, + + // Error + StatusErrorInvalidBurnTxID, + StatusRequestTooLate, + StatusNftRegGettingFailed, + StatusNftRegDecodingFailed, + StatusNftRegTicketInvalid, + StatusListTradeTicketsFailed, + StatusTradeTicketsNotFound, + StatusTradeTicketMismatched, + StatusTimestampVerificationFailed, + StatusTimestampInvalid, + StatusRQServiceConnectionFailed, + StatusSymbolFileNotFound, + StatusSymbolFileInvalid, + StatusSymbolNotFound, + StatusSymbolMismatched, + StatusSymbolsNotEnough, + StatusFileDecodingFailed, + StatusFileReadingFailed, + StatusFileMismatched, + StatusFileEmpty, + StatusKeyNotFound, + StatusFileRestoreFailed, + StatusFileExists, + + // Final + StatusTaskCanceled, + StatusTaskCompleted, + }, + }, + } + + for i, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("testCase:%d", i), func(t *testing.T) { + t.Parallel() + + var expectedNames []string + for _, status := range testCase.expectedStatues { + expectedNames = append(expectedNames, StatusNames()[status]) + } + + assert.Equal(t, expectedNames, StatusNames()) + }) + + } +} + +func TestStatusString(t *testing.T) { + t.Parallel() + + testCases := []struct { + status Status + expectedValue string + }{ + { + status: StatusTaskStarted, + expectedValue: StatusNames()[StatusTaskStarted], + }, { + status: StatusFileDecoded, + expectedValue: StatusNames()[StatusFileDecoded], + }, { + status: StatusRequestTooLate, + expectedValue: StatusNames()[StatusRequestTooLate], + }, { + status: StatusNftRegGettingFailed, + expectedValue: StatusNames()[StatusNftRegGettingFailed], + }, { + status: StatusNftRegDecodingFailed, + expectedValue: StatusNames()[StatusNftRegDecodingFailed], + }, { + status: StatusNftRegTicketInvalid, + expectedValue: StatusNames()[StatusNftRegTicketInvalid], + }, { + status: StatusListTradeTicketsFailed, + expectedValue: StatusNames()[StatusListTradeTicketsFailed], + }, { + status: StatusTradeTicketsNotFound, + expectedValue: StatusNames()[StatusTradeTicketsNotFound], + }, { + status: StatusTradeTicketMismatched, + expectedValue: StatusNames()[StatusTradeTicketMismatched], + }, { + status: StatusTimestampVerificationFailed, + expectedValue: StatusNames()[StatusTimestampVerificationFailed], + }, { + status: StatusTimestampInvalid, + expectedValue: StatusNames()[StatusTimestampInvalid], + }, { + status: StatusRQServiceConnectionFailed, + expectedValue: StatusNames()[StatusRQServiceConnectionFailed], + }, { + status: StatusSymbolFileNotFound, + expectedValue: StatusNames()[StatusSymbolFileNotFound], + }, { + status: StatusSymbolFileInvalid, + expectedValue: StatusNames()[StatusSymbolFileInvalid], + }, { + status: StatusSymbolNotFound, + expectedValue: StatusNames()[StatusSymbolNotFound], + }, { + status: StatusSymbolMismatched, + expectedValue: StatusNames()[StatusSymbolMismatched], + }, { + status: StatusSymbolsNotEnough, + expectedValue: StatusNames()[StatusSymbolsNotEnough], + }, { + status: StatusFileDecodingFailed, + expectedValue: StatusNames()[StatusFileDecodingFailed], + }, { + status: StatusFileReadingFailed, + expectedValue: StatusNames()[StatusFileReadingFailed], + }, { + status: StatusFileMismatched, + expectedValue: StatusNames()[StatusFileMismatched], + }, { + status: StatusFileEmpty, + expectedValue: StatusNames()[StatusFileEmpty], + }, { + status: StatusTaskCanceled, + expectedValue: StatusNames()[StatusTaskCanceled], + }, { + status: StatusTaskCompleted, + expectedValue: StatusNames()[StatusTaskCompleted], + }, + } + + for _, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("status:%v/value:%s", testCase.status, testCase.expectedValue), func(t *testing.T) { + t.Parallel() + + value := testCase.status.String() + assert.Equal(t, testCase.expectedValue, value) + }) + } +} + +func TestStatusIsFinal(t *testing.T) { + t.Parallel() + + testCases := []struct { + status Status + expectedValue bool + }{ + { + status: StatusTaskStarted, + expectedValue: false, + }, { + status: StatusFileDecoded, + expectedValue: false, + }, { + status: StatusRequestTooLate, + expectedValue: false, + }, { + status: StatusNftRegGettingFailed, + expectedValue: false, + }, { + status: StatusNftRegDecodingFailed, + expectedValue: false, + }, { + status: StatusNftRegTicketInvalid, + expectedValue: false, + }, { + status: StatusListTradeTicketsFailed, + expectedValue: false, + }, { + status: StatusTradeTicketsNotFound, + expectedValue: false, + }, { + status: StatusTradeTicketMismatched, + expectedValue: false, + }, { + status: StatusTimestampVerificationFailed, + expectedValue: false, + }, { + status: StatusTimestampInvalid, + expectedValue: false, + }, { + status: StatusRQServiceConnectionFailed, + expectedValue: false, + }, { + status: StatusSymbolFileNotFound, + expectedValue: false, + }, { + status: StatusSymbolFileInvalid, + expectedValue: false, + }, { + status: StatusSymbolNotFound, + expectedValue: false, + }, { + status: StatusSymbolMismatched, + expectedValue: false, + }, { + status: StatusSymbolsNotEnough, + expectedValue: false, + }, { + status: StatusFileDecodingFailed, + expectedValue: false, + }, { + status: StatusFileReadingFailed, + expectedValue: false, + }, { + status: StatusFileMismatched, + expectedValue: false, + }, { + status: StatusFileEmpty, + expectedValue: false, + }, { + status: StatusTaskCanceled, + expectedValue: true, + }, { + status: StatusTaskCompleted, + expectedValue: true, + }, + } + + for _, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("status:%v/value:%v", testCase.status, testCase.expectedValue), func(t *testing.T) { + t.Parallel() + + value := testCase.status.IsFinal() + assert.Equal(t, testCase.expectedValue, value) + }) + } +} + +func TestStatusIsFailure(t *testing.T) { + t.Parallel() + + testCases := []struct { + status Status + expectedValue bool + }{ + { + status: StatusTaskStarted, + expectedValue: false, + }, { + status: StatusFileDecoded, + expectedValue: false, + }, { + status: StatusRequestTooLate, + expectedValue: true, + }, { + status: StatusNftRegGettingFailed, + expectedValue: true, + }, { + status: StatusNftRegDecodingFailed, + expectedValue: true, + }, { + status: StatusNftRegTicketInvalid, + expectedValue: true, + }, { + status: StatusListTradeTicketsFailed, + expectedValue: true, + }, { + status: StatusTradeTicketsNotFound, + expectedValue: true, + }, { + status: StatusTradeTicketMismatched, + expectedValue: true, + }, { + status: StatusTimestampVerificationFailed, + expectedValue: true, + }, { + status: StatusTimestampInvalid, + expectedValue: true, + }, { + status: StatusRQServiceConnectionFailed, + expectedValue: true, + }, { + status: StatusSymbolFileNotFound, + expectedValue: true, + }, { + status: StatusSymbolFileInvalid, + expectedValue: true, + }, { + status: StatusSymbolNotFound, + expectedValue: true, + }, { + status: StatusSymbolMismatched, + expectedValue: true, + }, { + status: StatusSymbolsNotEnough, + expectedValue: true, + }, { + status: StatusFileDecodingFailed, + expectedValue: true, + }, { + status: StatusFileReadingFailed, + expectedValue: true, + }, { + status: StatusFileMismatched, + expectedValue: true, + }, { + status: StatusFileEmpty, + expectedValue: true, + }, { + status: StatusTaskCanceled, + expectedValue: true, + }, { + status: StatusTaskCompleted, + expectedValue: false, + }, + } + + for _, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("status:%v/value:%v", testCase.status, testCase.expectedValue), func(t *testing.T) { + t.Parallel() + + value := testCase.status.IsFailure() + assert.Equal(t, testCase.expectedValue, value) + }) + } +} diff --git a/supernode/services/common/storage_handler.go b/supernode/services/common/storage_handler.go new file mode 100644 index 00000000..49fb7189 --- /dev/null +++ b/supernode/services/common/storage_handler.go @@ -0,0 +1,374 @@ +package common + +import ( + "context" + "fmt" + "math" + "sort" + "time" + + "github.com/LumeraProtocol/supernode/p2p" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + rqnode "github.com/LumeraProtocol/supernode/pkg/raptorq" + "github.com/LumeraProtocol/supernode/pkg/storage/files" + "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/pkg/utils" +) + +const ( + loadSymbolsBatchSize = 2500 + storeSymbolsPercent = 10 + concurrency = 1 +) + +// StorageHandler provides common logic for RQ and P2P operations +type StorageHandler struct { + P2PClient p2p.Client + RqClient rqnode.ClientInterface + + rqAddress string + rqDir string + + TaskID string + TxID string + + store rqstore.Store + semaphore chan struct{} +} + +// NewStorageHandler creates instance of StorageHandler +func NewStorageHandler(p2p p2p.Client, rq rqnode.ClientInterface, + rqAddress string, rqDir string, store rqstore.Store) *StorageHandler { + + return &StorageHandler{ + P2PClient: p2p, + RqClient: rq, + rqAddress: rqAddress, + rqDir: rqDir, + store: store, + semaphore: make(chan struct{}, concurrency), + } +} + +// StoreFileIntoP2P stores file into P2P +func (h *StorageHandler) StoreFileIntoP2P(ctx context.Context, file *files.File, typ int) (string, error) { + data, err := file.Bytes() + if err != nil { + return "", errors.Errorf("store file %s into p2p", file.Name()) + } + return h.StoreBytesIntoP2P(ctx, data, typ) +} + +// StoreBytesIntoP2P into P2P actual data +func (h *StorageHandler) StoreBytesIntoP2P(ctx context.Context, data []byte, typ int) (string, error) { + return h.P2PClient.Store(ctx, data, typ) +} + +// StoreBatch stores into P2P array of bytes arrays +func (h *StorageHandler) StoreBatch(ctx context.Context, list [][]byte, typ int) error { + val := ctx.Value(log.TaskIDKey) + taskID := "" + if val != nil { + taskID = fmt.Sprintf("%v", val) + } + log.WithContext(ctx).WithField("task_id", taskID).Info("task_id in storeList") + + return h.P2PClient.StoreBatch(ctx, list, typ, taskID) +} + +func (h *StorageHandler) StoreRaptorQSymbolsIntoP2P(ctx context.Context, taskID string) error { + h.semaphore <- struct{}{} // Acquire slot + defer func() { + <-h.semaphore // Release the semaphore slot + }() + + dir, err := h.store.GetDirectoryByTxID(taskID) + if err != nil { + return fmt.Errorf("error fetching symbols dir from rq DB: %w", err) + } + + fileMap, err := utils.ReadDirFilenames(dir) + if err != nil { + return fmt.Errorf("error reading file-names from symbols dir: %w", err) + } + + // Create a slice of keys from keysMap and sort it + keys := make([]string, 0, len(fileMap)) + for key := range fileMap { + keys = append(keys, key) + } + sort.Strings(keys) // Sort the keys alphabetically + + if len(keys) > loadSymbolsBatchSize { + // Calculate 15% of the total keys, rounded up + requiredKeysCount := int(math.Ceil(float64(len(keys)) * storeSymbolsPercent / 100)) + + // Get the subset of keys (15%) + if requiredKeysCount > len(keys) { + requiredKeysCount = len(keys) // Ensure we don't exceed the available keys count + } + keys = keys[:requiredKeysCount] + } + + // Iterate over sorted keys in batches + batchKeys := make(map[string][]byte) + count := 0 + + log.WithContext(ctx).WithField("count", len(keys)).Info("storing raptorQ symbols") + for _, key := range keys { + batchKeys[key] = nil + count++ + if count%loadSymbolsBatchSize == 0 { + if err := h.storeSymbolsInP2P(ctx, dir, batchKeys); err != nil { + return err + } + batchKeys = make(map[string][]byte) // Reset batchKeys after storing + } + } + + // Store any remaining symbols in the last batch + if len(batchKeys) > 0 { + if err := h.storeSymbolsInP2P(ctx, dir, batchKeys); err != nil { + return err + } + } + + if err := h.store.UpdateIsFirstBatchStored(h.TxID); err != nil { + return fmt.Errorf("error updating first batch stored flag in rq DB: %w", err) + } + log.WithContext(ctx).WithField("curr-time", time.Now().UTC()).WithField("count", len(keys)).Info("stored RaptorQ symbols") + + return nil +} + +func (h *StorageHandler) storeSymbolsInP2P(ctx context.Context, dir string, batchKeys map[string][]byte) error { + val := ctx.Value(log.TaskIDKey) + taskID := "" + if val != nil { + taskID = fmt.Sprintf("%v", val) + } + // Load symbols from the database for the current batch + log.WithContext(ctx).WithField("count", len(batchKeys)).Info("loading batch symbols") + loadedSymbols, err := utils.LoadSymbols(dir, batchKeys) + if err != nil { + return fmt.Errorf("load batch symbols from db: %w", err) + } + + log.WithContext(ctx).WithField("count", len(loadedSymbols)).Info("loaded batch symbols, storing now") + // Prepare batch for P2P storage return nil + result := make([][]byte, len(loadedSymbols)) + i := 0 + for key, value := range loadedSymbols { + result[i] = value + loadedSymbols[key] = nil // Release the reference for faster memory cleanup + i++ + } + + // Store the loaded symbols in P2P + if err := h.P2PClient.StoreBatch(ctx, result, P2PDataRaptorQSymbol, taskID); err != nil { + return fmt.Errorf("store batch raptorq symbols in p2p: %w", err) + } + log.WithContext(ctx).WithField("count", len(loadedSymbols)).Info("stored batch symbols") + + if err := utils.DeleteSymbols(ctx, dir, batchKeys); err != nil { + return fmt.Errorf("delete batch symbols from db: %w", err) + } + log.WithContext(ctx).WithField("count", len(loadedSymbols)).Info("deleted batch symbols") + + return nil +} + +/* +// GenerateRaptorQSymbols calls RQ service to produce RQ Symbols +func (h *StorageHandler) GenerateRaptorQSymbols(ctx context.Context, data []byte, name string) (map[string][]byte, error) { + if h.RqClient == nil { + log.WithContext(ctx).Warnf("RQ Server is not initialized") + return nil, errors.Errorf("RQ Server is not initialized") + } + + b := backoff.NewExponentialBackOff() + b.MaxElapsedTime = 3 * time.Minute + b.InitialInterval = 200 * time.Millisecond + + var conn rqnode.Connection + if err := backoff.Retry(backoff.Operation(func() error { + var err error + conn, err = h.RqClient.Connect(ctx, h.rqAddress) + if err != nil { + return errors.Errorf("connect to raptorq service: %w", err) + } + + return nil + }), b); err != nil { + return nil, fmt.Errorf("retry connect to raptorq service: %w", err) + } + defer func() { + if err := conn.Close(); err != nil { + log.WithContext(ctx).WithError(err).Error("error closing rq-connection") + } + }() + + rqService := conn.RaptorQ(&rqnode.Config{ + RqFilesDir: h.rqDir, + }) + + b.Reset() + + // encodeResp := &rqnode.EncodeResponse{} + if err := backoff.Retry(backoff.Operation(func() error { + var err error + // encodeResp, err = rqService.RQEncode(ctx, data, h.TxID, h.store) + _, err = rqService.Encode(ctx, rqnode.EncodeRequest{}) // FIXME : use the resp + // encodeResp = &encodeRes + if err != nil { + return errors.Errorf("create raptorq symbol from data %s: %w", name, err) + } + + return nil + }), b); err != nil { + return nil, fmt.Errorf("retry do rqencode service: %w", err) + } + + return map[string][]byte{}, nil // FIXME : return proper symbols +} + +// GetRaptorQEncodeInfo calls RQ service to get Encoding info and list of RQIDs +func (h *StorageHandler) GetRaptorQEncodeInfo(ctx context.Context, + data []byte, num uint32, hash string, pastelID string, +) (encodeInfo *rqnode.EncodeResponse, err error) { + if h.RqClient == nil { + log.WithContext(ctx).Warnf("RQ Server is not initialized") + return nil, errors.Errorf("RQ Server is not initialized") + } + + b := backoff.NewExponentialBackOff() + b.MaxElapsedTime = 3 * time.Minute + b.InitialInterval = 500 * time.Millisecond + + var conn rqnode.Connection + if err := backoff.Retry(backoff.Operation(func() error { + var err error + conn, err = h.RqClient.Connect(ctx, h.rqAddress) + if err != nil { + return errors.Errorf("connect to raptorq service: %w", err) + } + + return nil + }), b); err != nil { + return nil, fmt.Errorf("retry connect to raptorq service: %w", err) + } + defer func() { + if err := conn.Close(); err != nil { + log.WithContext(ctx).WithError(err).Error("error closing rq-connection") + } + }() + + rqService := conn.RaptorQ(&rqnode.Config{ + RqFilesDir: h.rqDir, + }) + + b.Reset() + if err := backoff.Retry(backoff.Operation(func() error { + var err error + // encodeInfo, err = rqService.EncodeMetaData(ctx, data, num, hash, pastelID) // TODO : remove + encodeI, err := rqService.EncodeMetaData(ctx, rqnode.EncodeMetadataRequest{ + Path: "", // FIXME + FilesNumber: num, + BlockHash: hash, + PastelId: pastelID, + }) + if err != nil { + return errors.Errorf("get raptorq encode info: %w", err) + } + encodeInfo = &encodeI + return nil + }), b); err != nil { + return nil, fmt.Errorf("retry do encode info on raptorq service: %w", err) + } + + return encodeInfo, nil +} + +// ValidateRaptorQSymbolIDs calls RQ service to get Encoding info and list of RQIDs and compares them to the similar data received from WN +func (h *StorageHandler) ValidateRaptorQSymbolIDs(ctx context.Context, + data []byte, num uint32, hash string, pastelID string, + haveData []byte) error { + + if len(haveData) == 0 { + return errors.Errorf("no symbols identifiers") + } + + encodeInfo, err := h.GetRaptorQEncodeInfo(ctx, data, num, hash, pastelID) + if err != nil { + return err + } + + // scan return symbol Id files + filesMap, err := scanSymbolIDFiles(encodeInfo.Path) + if err != nil { + return errors.Errorf("scan symbol id files folder %s: %w", encodeInfo.Path, err) + } + + if len(filesMap) != int(num) { // FIXME : copies == num ? + return errors.Errorf("symbol id files count not match: expect %d, output %d", num, len(filesMap)) + } + + // pick just one file generated to compare + var gotFile, haveFile rqnode.RawSymbolIDFile + for _, v := range filesMap { + gotFile = v + break + } + + if err := json.Unmarshal(haveData, &haveFile); err != nil { + return errors.Errorf("decode raw rq file: %w", err) + } + + if err := utils.EqualStrList(gotFile.SymbolIdentifiers, haveFile.SymbolIdentifiers); err != nil { + return errors.Errorf("raptor symbol mismatched: %w", err) + } + return nil +} + +// scan symbol id files in "meta" folder, return map of file Ids & contents of file (as list of line) +func scanSymbolIDFiles(dirPath string) (map[string]rqnode.RawSymbolIDFile, error) { + filesMap := make(map[string]rqnode.RawSymbolIDFile) + + err := filepath.Walk(dirPath, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return errors.Errorf("scan a path %s: %w", path, err) + } + + if info.IsDir() { + // TODO - compare it to root + return nil + } + + fileID := filepath.Base(path) + + configFile, err := os.Open(path) + if err != nil { + return errors.Errorf("opening file: %s - err: %w", path, err) + } + defer configFile.Close() + + file := rqnode.RawSymbolIDFile{} + jsonParser := json.NewDecoder(configFile) + if err = jsonParser.Decode(&file); err != nil { + return errors.Errorf("parsing file: %s - err: %w", path, err) + } + + filesMap[fileID] = file + + return nil + }) + + if err != nil { + return nil, err + } + + return filesMap, nil +} + +*/ diff --git a/supernode/services/common/supernode_task.go b/supernode/services/common/supernode_task.go new file mode 100644 index 00000000..ac7184f0 --- /dev/null +++ b/supernode/services/common/supernode_task.go @@ -0,0 +1,61 @@ +package common + +import ( + "context" + "fmt" + + "github.com/LumeraProtocol/supernode/pkg/common/task" + "github.com/LumeraProtocol/supernode/pkg/common/task/state" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/storage/files" +) + +// TaskCleanerFunc pointer to func that removes artefacts +type TaskCleanerFunc func() + +// SuperNodeTask base "class" for Task +type SuperNodeTask struct { + task.Task + + LogPrefix string +} + +// RunHelper common code for Task runner +func (task *SuperNodeTask) RunHelper(ctx context.Context, clean TaskCleanerFunc) error { + ctx = task.context(ctx) + log.WithContext(ctx).Debug("Start task") + defer log.WithContext(ctx).Info("Task canceled") + defer task.Cancel() + + task.SetStatusNotifyFunc(func(status *state.Status) { + log.WithContext(ctx).WithField("status", status.String()).Debug("States updated") + }) + + defer clean() + + return task.RunAction(ctx) +} + +func (task *SuperNodeTask) context(ctx context.Context) context.Context { + return log.ContextWithPrefix(ctx, fmt.Sprintf("%s-%s", task.LogPrefix, task.ID())) +} + +// RemoveFile removes file from FS (TODO: move to gonode.common) +func (task *SuperNodeTask) RemoveFile(file *files.File) { + if file != nil { + log.Debugf("remove file: %s", file.Name()) + if err := file.Remove(); err != nil { + log.Debugf("remove file failed: %s", err.Error()) + } + } +} + +// NewSuperNodeTask returns a new Task instance. +func NewSuperNodeTask(logPrefix string) *SuperNodeTask { + snt := &SuperNodeTask{ + Task: task.New(StatusTaskStarted), + LogPrefix: logPrefix, + } + + return snt +} diff --git a/tests/system/cascade_test.go b/tests/system/cascade_test.go new file mode 100644 index 00000000..d0041ec5 --- /dev/null +++ b/tests/system/cascade_test.go @@ -0,0 +1,382 @@ +package system + +import ( + "context" + "fmt" + "net" + "os" + "path/filepath" + "strings" + "testing" + "time" + + CascadePb "github.com/LumeraProtocol/supernode/gen/supernode/action/cascade" + "github.com/LumeraProtocol/supernode/p2p" + "github.com/LumeraProtocol/supernode/pkg/net/credentials/alts/conn" + snClient "github.com/LumeraProtocol/supernode/supernode/node/supernode/client" + + "github.com/LumeraProtocol/supernode/p2p/kademlia" + "github.com/LumeraProtocol/supernode/pkg/lumera" + lumeraActionMod "github.com/LumeraProtocol/supernode/pkg/lumera/modules/action" + lumeraNodeMod "github.com/LumeraProtocol/supernode/pkg/lumera/modules/node" + lumeraSupernodeMod "github.com/LumeraProtocol/supernode/pkg/lumera/modules/supernode" + lumeraTxMod "github.com/LumeraProtocol/supernode/pkg/lumera/modules/tx" + ltc "github.com/LumeraProtocol/supernode/pkg/net/credentials" + "github.com/LumeraProtocol/supernode/pkg/raptorq" + "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/pkg/testutil" + "github.com/LumeraProtocol/supernode/supernode/cmd" + "github.com/LumeraProtocol/supernode/supernode/config" + CascadeActionServer "github.com/LumeraProtocol/supernode/supernode/node/action/server/cascade" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" + "github.com/LumeraProtocol/supernode/supernode/services/common" + + "github.com/LumeraProtocol/lumera/x/action/types" + snTypes "github.com/LumeraProtocol/lumera/x/supernode/types" + + types1 "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +const ( + basePort = 9000 + dataDirRoot = "./data/nodes" +) + +func TestSingleSupernodeSetup(t *testing.T) { + conn.RegisterALTSRecordProtocols() + defer conn.UnregisterALTSRecordProtocols() + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) + defer cancel() + + t.Log("Setting up a single Supernode with P2P and Cascade services...") + + kr := testutil.CreateTestKeyring() + + // Create test accounts + accountNames := make([]string, 0) + numP2PNodes := kademlia.Alpha + 1 + for i := 0; i < numP2PNodes; i++ { + accountNames = append(accountNames, fmt.Sprintf("supernode-%d", i)) + } + accountAddresses := testutil.SetupTestAccounts(t, kr, accountNames) + + var bootstrapNodeAddr string + req := &CascadePb.UploadInputDataRequest{ + ActionId: "test-action-id", + Filename: "test_file.txt", + DataHash: "abcdef1234567890abcdef1234567890", + RqMax: 10, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockLumeraClient := setupMockLumeraClient(ctrl, req.ActionId, accountAddresses[0]) + + p2pClients, rqStores := SetupTestP2PNodes(t, ctx, kr, mockLumeraClient, numP2PNodes, accountNames, accountAddresses) + + cascadeService := setupCascadeService(ctrl, mockLumeraClient, accountAddresses[0], p2pClients[0], rqStores[0]) + + grpcAddr := fmt.Sprintf("127.0.0.1:%d", basePort+100) + grpcServer := startSupernodeGRPCServer(t, cascadeService, grpcAddr) + grpcClient := connectToSupernodeGRPC(t, grpcAddr) + + var err error + supernodeConfig := getSupernodeConfig(0, accountNames, &bootstrapNodeAddr) + supernode, err := cmd.NewSupernode(ctx, supernodeConfig, kr, p2pClients[0], rqStores[0], mockLumeraClient) + require.NoError(t, err, "Failed to create supernode") + + go func() { + cascadeService.Run(ctx) + }() + + t.Log("Sending UploadInputData request to the Supernode...") + + client := grpcClient + + stream, err := client.Session(ctx) + require.NoError(t, err, "should successfully create session") + sessReq := &CascadePb.SessionRequest{ + IsPrimary: true, + } + + err = stream.Send(sessReq) + require.NoError(t, err, "should successfully send to stream") + + res, err := stream.Recv() + require.NoError(t, err, "should successfully rcv session") + + ctx = snClient.ContextWithMDSessID(ctx, res.SessID) + err = storeSymbolFilesToTempDir(res.SessID, rqStores[0]) + require.NoError(t, err) + + resp, err := client.UploadInputData(ctx, req) + require.NoError(t, err, "Failed to upload input data at Supernode") + require.True(t, resp.Success, "UploadInputData request at Supernode should succeed") + + t.Cleanup(func() { + cleanup(t, supernode, grpcServer) + }) +} + +// cleanup stops the supernode, GRPC server and cleans up data directories. +func cleanup(t *testing.T, supernode *cmd.Supernode, grpcServer *grpc.Server) { + t.Log("Cleaning up supernode, gRPC server, and data directories...") + + if supernode != nil { + //err := supernode.Stop(context.Background()) + //if err != nil { + // t.Logf("Failed to stop supernode: %v", err) + //} + } + if grpcServer != nil { + grpcServer.Stop() + } + + err := os.RemoveAll("./data") + if err != nil { + t.Logf("Failed to remove data directory: %v", err) + } else { + t.Log("✅ Data directories cleaned up successfully.") + } + + os.RemoveAll("./tmp") +} + +func setupCascadeService(ctrl *gomock.Controller, lumeraClient lumera.Client, accountAddr string, p2pClient *p2p.P2P, rqStore *rqstore.SQLiteRQStore) *cascade.CascadeService { + dataDir := filepath.Join(dataDirRoot, accountAddr) + + // fileStorage := fs.NewFileStorage(filepath.Join(dataDir, "storage")) + mockRaptorQ := raptorq.NewMockRaptorQ(ctrl) + mockRaptorQClient := raptorq.NewMockClientInterface(ctrl) + + mockRaptorQ.EXPECT().GenRQIdentifiersFiles( + gomock.Any(), // ctx + gomock.Any(), + ).Return(raptorq.GenRQIdentifiersFilesResponse{ + RQIDsIc: uint32(12345), + RQIDs: []string{"id1", "id2", "id3"}, + RQIDsFiles: [][]byte{[]byte("first"), []byte("second")}, + RQIDsFile: []byte("some_bytes_for_file"), + CreatorSignature: []byte(accountAddr), + RQEncodeParams: raptorq.EncoderParameters{Oti: []byte("some_encoded_value")}, + }, nil) + + return cascade.NewCascadeService( + &cascade.Config{ + Config: common.Config{ + SupernodeAccountAddress: accountAddr, + }, + RaptorQServiceAddress: "", + RqFilesDir: filepath.Join(dataDir, "rqfiles"), + NumberConnectedNodes: 1, + }, + lumeraClient, + nil, + *p2pClient, + mockRaptorQ, + mockRaptorQClient, + rqStore, + ) +} + +// Starts gRPC server for a Supernode +func startSupernodeGRPCServer(t *testing.T, service *cascade.CascadeService, address string) *grpc.Server { + t.Helper() + grpcServer := grpc.NewServer() + CascadePb.RegisterCascadeServiceServer(grpcServer, + CascadeActionServer.NewCascadeActionServer(service), + ) + + listener, err := net.Listen("tcp", address) + require.NoError(t, err, fmt.Sprintf("Failed to start gRPC listener on %s", address)) + + go func() { + if err := grpcServer.Serve(listener); err != nil { + t.Logf("gRPC server stopped: %v", err) + } + }() + time.Sleep(2 * time.Second) + return grpcServer +} + +func connectToSupernodeGRPC(t *testing.T, address string) CascadePb.CascadeServiceClient { + conn, err := grpc.Dial(address, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err, fmt.Sprintf("Failed to connect to gRPC server at %s", address)) + return CascadePb.NewCascadeServiceClient(conn) +} + +func setupMockLumeraClient(ctrl *gomock.Controller, actionID string, accAddress string) lumera.Client { + mockLumeraClient := lumera.NewMockClient(ctrl) + mockActionClient := lumeraActionMod.NewMockModule(ctrl) + mockNodeClient := lumeraNodeMod.NewMockModule(ctrl) + mockTxClient := lumeraTxMod.NewMockModule(ctrl) + mockSupernodeClient := lumeraSupernodeMod.NewMockModule(ctrl) + + mockLumeraClient.EXPECT().Action().Return(mockActionClient).AnyTimes() + mockLumeraClient.EXPECT().Node().Return(mockNodeClient).AnyTimes() + mockLumeraClient.EXPECT().Tx().Return(mockTxClient).AnyTimes() + mockLumeraClient.EXPECT().SuperNode().Return(mockSupernodeClient).AnyTimes() + + mockActionClient.EXPECT().GetAction(gomock.Any(), actionID).Return(&types.QueryGetActionResponse{ + Action: &types.Action{ + ActionID: actionID, + Creator: "test-action-creator", + BlockHeight: 100, + Metadata: &types.Metadata{ + MetadataType: &types.Metadata_CascadeMetadata{ + CascadeMetadata: &types.CascadeMetadata{ + DataHash: "abcdef1234567890abcdef1234567890", + FileName: "test_file.txt", + RqMax: 10, + RqIc: 5, + }, + }, + }, + State: types.ActionStateApproved, + }, + }, nil).AnyTimes() + + mockNodeClient.EXPECT().GetLatestBlock(gomock.Any()).Return(&cmtservice.GetLatestBlockResponse{ + BlockId: &types1.BlockID{ + Hash: []byte("latestblockhash"), + }, + SdkBlock: &cmtservice.Block{ + Header: cmtservice.Header{ + Height: 100, + }, + }, + }, nil).AnyTimes() + + mockSupernodeClient.EXPECT().GetTopSuperNodesForBlock(gomock.Any(), gomock.Any()).Return(&snTypes.QueryGetTopSuperNodesForBlockResponse{ + Supernodes: []*snTypes.SuperNode{ + { + SupernodeAccount: accAddress, + }, + }, + }, nil).AnyTimes() + + return mockLumeraClient +} + +func getSupernodeConfig(i int, accountNames []string, bootstrapNodeAddr *string) *config.Config { + bootstrapNodes := "" + if i > 0 { + bootstrapNodes = *bootstrapNodeAddr + } else { + *bootstrapNodeAddr = fmt.Sprintf("127.0.0.1:%s@%d", accountNames[i], basePort+i) + } + + return &config.Config{ + P2PConfig: config.P2PConfig{ + ListenAddress: "127.0.0.1", + Port: uint16(basePort + i), + DataDir: filepath.Join(dataDirRoot, accountNames[i]), + BootstrapNodes: bootstrapNodes, + }, + SupernodeConfig: config.SupernodeConfig{ + KeyName: accountNames[i], + }, + } +} + +// SetupTestP2PNodes now supports multiple nodes +func SetupTestP2PNodes(t *testing.T, ctx context.Context, kr keyring.Keyring, + lumeraC lumera.Client, numP2PNodes int, accountNames []string, accountAddresses []string, +) ([]*p2p.P2P, []*rqstore.SQLiteRQStore) { + var services []*p2p.P2P + var rqStores []*rqstore.SQLiteRQStore + + // Setup node addresses and their corresponding Lumera IDs + var nodeConfigs ltc.LumeraAddresses + for i := 0; i < numP2PNodes; i++ { + nodeConfigs = append(nodeConfigs, ltc.LumeraAddress{ + Identity: accountAddresses[i], + Host: "127.0.0.1", + Port: uint16(9000 + i), + }) + } + + for i, config := range nodeConfigs { + mockClient, err := testutil.NewMockLumeraClient(kr, accountAddresses) + require.NoError(t, err, "failed to create tendermint client") + + dataDir := fmt.Sprintf("./data/node%d", i) + err = os.MkdirAll(dataDir, 0755) + require.NoError(t, err, "failed to create data directory for node %d: %v", i, err) + + // Get all previous addresses to use as bootstrap addresses + bootstrapAddresses := make([]string, i) + for j := 0; j < i; j++ { + bootstrapAddresses[j] = nodeConfigs[j].String() + } + + p2pConfig := &p2p.Config{ + ListenAddress: config.Host, + Port: config.Port, + DataDir: dataDir, + ID: config.Identity, + BootstrapNodes: strings.Join(bootstrapAddresses, ","), + } + + rqStoreFile := filepath.Join(dataDir, "rqstore.db") + require.NoError(t, os.MkdirAll(filepath.Dir(rqStoreFile), 0755), "Failed to create rqstore directory for node %d", i) + + rqStore, err := rqstore.NewSQLiteRQStore(rqStoreFile) + require.NoError(t, err, "Failed to create rqstore for node %d", i) + + p2pClient, err := p2p.New(ctx, p2pConfig, mockClient, kr, rqStore, nil, nil) + require.NoError(t, err, "Failed to create P2P client for node %d", i) + + go func() { + if err := p2pClient.Run(ctx); err != nil && err != context.Canceled { + t.Logf("P2P service for node %d failed: %v", i, err) + } + }() + + services = append(services, &p2pClient) + rqStores = append(rqStores, rqStore) + + // Give nodes time to start up and connect + time.Sleep(2 * time.Second) + } + + // Give extra time for all nodes to connect + time.Sleep(3 * time.Second) + + return services, rqStores +} + +func storeSymbolFilesToTempDir(taskID string, store rqstore.Store) error { + store.StoreSymbolDirectory(taskID, "./tmp") + + tempDirPath := filepath.Join("./tmp") + if err := os.MkdirAll(tempDirPath, 0755); err != nil { + return fmt.Errorf("failed to create temporary directory: %v", err) + } + + // Create temporary files + for i := 0; i < 3; i++ { // Example: Create 3 temp files + tempFilePath := filepath.Join(tempDirPath, fmt.Sprintf("tempfile_%d.txt", i)) + tempFile, err := os.Create(tempFilePath) + if err != nil { + return fmt.Errorf("failed to create temporary file: %v", err) + } + defer tempFile.Close() + + // Example: Write some data to the file + _, err = tempFile.WriteString("This is a sample text for temporary file.\n") + if err != nil { + return fmt.Errorf("failed to write to temporary file: %v", err) + } + } + + return nil +} diff --git a/tests/system/go.mod b/tests/system/go.mod index 7408152d..4b52e8e0 100644 --- a/tests/system/go.mod +++ b/tests/system/go.mod @@ -22,13 +22,15 @@ require ( github.com/stretchr/testify v1.10.0 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/grpc v1.70.0 // indirect + google.golang.org/grpc v1.70.0 ) require ( cosmossdk.io/math v1.4.0 + github.com/LumeraProtocol/lumera v0.4.3 github.com/LumeraProtocol/supernode v0.0.0-00010101000000-000000000000 github.com/cometbft/cometbft v0.38.15 + github.com/golang/mock v1.6.0 github.com/tidwall/gjson v1.14.2 github.com/tidwall/sjson v1.2.5 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 @@ -52,7 +54,6 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce // indirect - github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cockroachdb/errors v1.11.3 // indirect @@ -73,6 +74,7 @@ require ( github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect github.com/dgraph-io/badger/v4 v4.2.0 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/disintegration/imaging v1.6.2 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/dot v1.6.2 // indirect github.com/fatih/color v1.15.0 // indirect @@ -115,9 +117,9 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/jmoiron/sqlx v1.4.0 // indirect - github.com/jpillora/longestcommon v0.0.0-20161227235612-adb9d91ee629 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.11 // indirect + github.com/kolesa-team/go-webp v1.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.9 // indirect @@ -167,6 +169,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/ratelimit v0.3.1 // indirect golang.org/x/crypto v0.33.0 // indirect + golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d // indirect golang.org/x/net v0.35.0 // indirect golang.org/x/sync v0.11.0 // indirect golang.org/x/sys v0.30.0 // indirect diff --git a/tests/system/go.sum b/tests/system/go.sum index 4d99523b..bbce5ffe 100644 --- a/tests/system/go.sum +++ b/tests/system/go.sum @@ -1,7 +1,22 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= +cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= +cloud.google.com/go/auth v0.6.0 h1:5x+d6b5zdezZ7gmLWD1m/xNjnaQ2YDhmIz/HH3doy1g= +cloud.google.com/go/auth v0.6.0/go.mod h1:b4acV+jLQDyjwm4OXHYjNvRi4jvGBzHWJRtJcy+2P4g= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/compute v1.27.1 h1:0WbBLIPNANheCRZ4h8QhgzjN53KMutbiVBOLtPiVzBU= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/iam v1.1.9 h1:oSkYLVtVme29uGYrOcKcvJRht7cHJpYD09GM9JaR0TE= +cloud.google.com/go/iam v1.1.9/go.mod h1:Nt1eDWNYH9nGQg3d/mY7U1hvfGmsaG9o/kLGoLoLXjQ= +cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= +cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= cosmossdk.io/api v0.7.6 h1:PC20PcXy1xYKH2KU4RMurVoFjjKkCgYRbVAD4PdqUuY= cosmossdk.io/api v0.7.6/go.mod h1:IcxpYS5fMemZGqyYtErK7OqvdM0C8kdW3dq8Q/XIG38= +cosmossdk.io/client/v2 v2.0.0-beta.5 h1:0LVv3nEByn//hFDIrYLs2WvsEU3HodOelh4SDHnA/1I= +cosmossdk.io/client/v2 v2.0.0-beta.5/go.mod h1:4p0P6o0ro+FizakJUYS9SeM94RNbv0thLmkHRw5o5as= cosmossdk.io/collections v0.4.0 h1:PFmwj2W8szgpD5nOd8GWH6AbYNi1f2J6akWXJ7P5t9s= cosmossdk.io/collections v0.4.0/go.mod h1:oa5lUING2dP+gdDquow+QjlF45eL1t4TJDypgGd+tv0= cosmossdk.io/core v0.11.1 h1:h9WfBey7NAiFfIcUhDVNS503I2P2HdZLebJlUIs8LPA= @@ -16,8 +31,18 @@ cosmossdk.io/math v1.4.0 h1:XbgExXFnXmF/CccPPEto40gOO7FpWu9yWNAZPN3nkNQ= cosmossdk.io/math v1.4.0/go.mod h1:O5PkD4apz2jZs4zqFdTr16e1dcaQCc5z6lkEnrrppuk= cosmossdk.io/store v1.1.1 h1:NA3PioJtWDVU7cHHeyvdva5J/ggyLDkyH0hGHl2804Y= cosmossdk.io/store v1.1.1/go.mod h1:8DwVTz83/2PSI366FERGbWSH7hL6sB7HbYp8bqksNwM= +cosmossdk.io/x/circuit v0.1.1 h1:KPJCnLChWrxD4jLwUiuQaf5mFD/1m7Omyo7oooefBVQ= +cosmossdk.io/x/circuit v0.1.1/go.mod h1:B6f/urRuQH8gjt4eLIXfZJucrbreuYrKh5CSjaOxr+Q= +cosmossdk.io/x/evidence v0.1.1 h1:Ks+BLTa3uftFpElLTDp9L76t2b58htjVbSZ86aoK/E4= +cosmossdk.io/x/evidence v0.1.1/go.mod h1:OoDsWlbtuyqS70LY51aX8FBTvguQqvFrt78qL7UzeNc= +cosmossdk.io/x/feegrant v0.1.1 h1:EKFWOeo/pup0yF0svDisWWKAA9Zags6Zd0P3nRvVvw8= +cosmossdk.io/x/feegrant v0.1.1/go.mod h1:2GjVVxX6G2fta8LWj7pC/ytHjryA6MHAJroBWHFNiEQ= +cosmossdk.io/x/nft v0.1.1 h1:pslAVS8P5NkW080+LWOamInjDcq+v2GSCo+BjN9sxZ8= +cosmossdk.io/x/nft v0.1.1/go.mod h1:Kac6F6y2gsKvoxU+fy8uvxRTi4BIhLOor2zgCNQwVgY= cosmossdk.io/x/tx v0.13.7 h1:8WSk6B/OHJLYjiZeMKhq7DK7lHDMyK0UfDbBMxVmeOI= cosmossdk.io/x/tx v0.13.7/go.mod h1:V6DImnwJMTq5qFjeGWpXNiT/fjgE4HtmclRmTqRVM3w= +cosmossdk.io/x/upgrade v0.1.4 h1:/BWJim24QHoXde8Bc64/2BSEB6W4eTydq0X/2f8+g38= +cosmossdk.io/x/upgrade v0.1.4/go.mod h1:9v0Aj+fs97O+Ztw+tG3/tp5JSlrmT7IcFhAebQHmOPo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= @@ -27,11 +52,17 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25 github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/CosmWasm/wasmd v0.53.0 h1:kdaoAi20bIb4VCsxw9pRaT2g5PpIp82Wqrr9DRVN9ao= +github.com/CosmWasm/wasmd v0.53.0/go.mod h1:FJl/aWjdpGof3usAMFQpDe07Rkx77PUzp0cygFMOvtw= +github.com/CosmWasm/wasmvm/v2 v2.1.2 h1:GkJ5bAsRlLHfIQVg/FY1VHwLyBwlCjAhDea0B8L+e20= +github.com/CosmWasm/wasmvm/v2 v2.1.2/go.mod h1:bMhLQL4Yp9CzJi9A83aR7VO9wockOsSlZbT4ztOl6bg= github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/LumeraProtocol/lumera v0.4.3 h1:q/FuT+JOLIpYdlunczRUr6K85r9Sn0lKvGltSrj4r6s= +github.com/LumeraProtocol/lumera v0.4.3/go.mod h1:MRqVY+f8edEBkDvpr4z2nJpglp3Qj1OUvjeWvrvIUSM= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= @@ -58,6 +89,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.44.224 h1:09CiaaF35nRmxrzWZ2uRq5v6Ghg/d2RiPjZnSgtt+RQ= +github.com/aws/aws-sdk-go v1.44.224/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= @@ -66,6 +99,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= @@ -86,8 +121,8 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= -github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= +github.com/bufbuild/protocompile v0.14.0 h1:z3DW4IvXE5G/uTOnSQn+qwQQxvhckkTWLS/0No/o7KU= +github.com/bufbuild/protocompile v0.14.0/go.mod h1:N6J1NYzkspJo3ZwyL4Xjvli86XOj1xq4qAasUFxGups= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -141,13 +176,11 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= -github.com/cosmos/cosmos-db v1.1.0 h1:KLHNVQ73h7vawXTpj9UJ7ZR2IXv51tsEHkQJJ9EBDzI= -github.com/cosmos/cosmos-db v1.1.0/go.mod h1:t7c4A6cfGdpUwwVxrQ0gQLeRQqGUBJu0yvE4F/26REg= +github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNCM= github.com/cosmos/cosmos-db v1.1.1/go.mod h1:AghjcIPqdhSLP/2Z0yha5xPH3nLnskz81pBx3tcVSAw= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.50.11 h1:LxR1aAc8kixdrs3itO+3a44sFoc+vjxVAOyPFx22yjk= -github.com/cosmos/cosmos-sdk v0.50.11/go.mod h1:gt14Meok2IDCjbDtjwkbUcgVNEpUBDN/4hg9cCUtLgw= +github.com/cosmos/cosmos-sdk v0.50.12 h1:WizeD4K74737Gq46/f9fq+WjyZ1cP/1bXwVR3dvyp0g= github.com/cosmos/cosmos-sdk v0.50.12/go.mod h1:hrWEFMU1eoXqLJeE6VVESpJDQH67FS1nnMrQIjO2daw= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= @@ -158,12 +191,15 @@ github.com/cosmos/gogoproto v1.7.0 h1:79USr0oyXAbxg3rspGh/m4SWNyoz/GLaAh0QlCe2fr github.com/cosmos/gogoproto v1.7.0/go.mod h1:yWChEv5IUEYURQasfyBW5ffkMHR/90hiHgbNgrtp4j0= github.com/cosmos/iavl v1.2.2 h1:qHhKW3I70w+04g5KdsdVSHRbFLgt3yY3qTMd4Xa4rC8= github.com/cosmos/iavl v1.2.2/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= +github.com/cosmos/ibc-go/modules/capability v1.0.1 h1:ibwhrpJ3SftEEZRxCRkH0fQZ9svjthrX2+oXdZvzgGI= +github.com/cosmos/ibc-go/modules/capability v1.0.1/go.mod h1:rquyOV262nGJplkumH+/LeYs04P3eV8oB7ZM4Ygqk4E= +github.com/cosmos/ibc-go/v8 v8.5.1 h1:3JleEMKBjRKa3FeTKt4fjg22za/qygLBo7mDkoYTNBs= +github.com/cosmos/ibc-go/v8 v8.5.1/go.mod h1:P5hkAvq0Qbg0h18uLxDVA9q1kOJ0l36htMsskiNwXbo= github.com/cosmos/ics23/go v0.11.0 h1:jk5skjT0TqX5e5QJbEnwXIS2yI2vnmLOgpQPeM5RtnU= github.com/cosmos/ics23/go v0.11.0/go.mod h1:A8OjxPE67hHST4Icw94hOxxFEJMBG031xIGF/JHNIY0= github.com/cosmos/keyring v1.2.0 h1:8C1lBP9xhImmIabyXW4c3vFjjLiBdGCmfLUfeZlV1Yo= github.com/cosmos/keyring v1.2.0/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= -github.com/cosmos/ledger-cosmos-go v0.13.3 h1:7ehuBGuyIytsXbd4MP43mLeoN2LTOEnk5nvue4rK+yM= -github.com/cosmos/ledger-cosmos-go v0.13.3/go.mod h1:HENcEP+VtahZFw38HZ3+LS3Iv5XV6svsnkk9vdJtLr8= +github.com/cosmos/ledger-cosmos-go v0.14.0 h1:WfCHricT3rPbkPSVKRH+L4fQGKYHuGOK9Edpel8TYpE= github.com/cosmos/ledger-cosmos-go v0.14.0/go.mod h1:E07xCWSBl3mTGofZ2QnL4cIUzMbbGVyik84QYKbX3RA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -189,8 +225,12 @@ github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkz github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c= +github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -349,10 +389,16 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= +github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= @@ -380,6 +426,10 @@ github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter v1.7.5 h1:dT58k9hQ/vbxNMwoI5+xFYAJuv6152UNvdHokfI5wE4= +github.com/hashicorp/go-getter v1.7.5/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -393,12 +443,16 @@ github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -434,14 +488,14 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jpillora/longestcommon v0.0.0-20161227235612-adb9d91ee629 h1:1dSBUfGlorLAua2CRx0zFN7kQsTpE2DQSmr7rrTNgY8= -github.com/jpillora/longestcommon v0.0.0-20161227235612-adb9d91ee629/go.mod h1:mb5nS4uRANwOJSZj8rlCWAfAcGi72GGMIXx+xGOjA7M= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -462,6 +516,8 @@ github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/kolesa-team/go-webp v1.0.4 h1:wQvU4PLG/X7RS0vAeyhiivhLRoxfLVRlDq4I3frdxIQ= +github.com/kolesa-team/go-webp v1.0.4/go.mod h1:oMvdivD6K+Q5qIIkVC2w4k2ZUnI1H+MyP7inwgWq9aA= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -514,6 +570,8 @@ github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= @@ -569,8 +627,8 @@ github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdM github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= -github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= @@ -670,6 +728,8 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0 github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shamaton/msgpack/v2 v2.2.0 h1:IP1m01pHwCrMa6ZccP9B3bqxEMKMSmMVAVKk54g3L/Y= +github.com/shamaton/msgpack/v2 v2.2.0/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -685,8 +745,7 @@ github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9yS github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= @@ -741,6 +800,8 @@ github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo= github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= @@ -748,6 +809,7 @@ github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7V github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= @@ -761,6 +823,10 @@ go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= @@ -776,8 +842,8 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -810,6 +876,9 @@ golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0J golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d h1:RNPAfi2nHY7C2srAV8A49jpsYr0ADedCk1wq6fTMTvs= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -821,6 +890,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -851,12 +921,13 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= -golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= +golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -864,6 +935,7 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -934,6 +1006,8 @@ golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -953,6 +1027,7 @@ golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -960,6 +1035,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.186.0 h1:n2OPp+PPXX0Axh4GuSsL5QL8xQCTb2oDwyzPnQvqUug= +google.golang.org/api v0.186.0/go.mod h1:hvRbBmgoje49RV3xqVXrmP6w93n6ehGgIVPYrGtBFFc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1056,6 +1133,8 @@ honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= +lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= diff --git a/tests/system/supernode.go b/tests/system/supernode.go deleted file mode 100644 index 6ce1b68c..00000000 --- a/tests/system/supernode.go +++ /dev/null @@ -1,122 +0,0 @@ -package system - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/LumeraProtocol/supernode/common/storage/rqstore" - "github.com/LumeraProtocol/supernode/p2p" - "github.com/LumeraProtocol/supernode/pkg/lumera" -) - -// SetupP2PServicesForNodes creates a P2P service for each Lumera node in the blockchain -// Each service uses the node's blockchain address as its Lumera ID -// Ports start at 8000 and increment sequentially -func SetupP2PServicesForNodes(t *testing.T, sut *SystemUnderTest, verbose bool, ctx context.Context) ([]p2p.Client, []*rqstore.SQLiteRQStore, error) { - t.Helper() - - // Get all node addresses - cli := NewLumeradCLI(t, sut, verbose) - var nodeAddresses []string - - t.Log("Retrieving addresses for all nodes:") - for i := 0; i < sut.nodesCount; i++ { - nodeName := fmt.Sprintf("node%d", i) - address := cli.GetKeyAddr(nodeName) - nodeAddresses = append(nodeAddresses, address) - t.Logf("Node %d (%s) address: %s", i, nodeName, address) - } - - // Setup P2P services using the node addresses - var services []p2p.Client - var rqStores []*rqstore.SQLiteRQStore - - t.Log("Setting up P2P services for each node...") - - // Create a Lumera client config - var nodeConfigs lumera.LumeraClientConfig - - // First pass: populate the node configs - for i, address := range nodeAddresses { - port := 8000 + i - nodeConfigs = append(nodeConfigs, struct { - Address string - LumeraID string - }{ - Address: fmt.Sprintf("127.0.0.1:%d", port), - LumeraID: address, - }) - } - - // Create the mock Lumera client with all node configs - mockClient := lumera.NewLumeraClient(nodeConfigs) - - // Second pass: create and start the actual P2P services - for i, address := range nodeAddresses { - port := 8000 + i - - // Create data directory for the node - dataDir := fmt.Sprintf("./data/p2p_node%d", i) - if err := os.MkdirAll(dataDir, 0755); err != nil { - return nil, nil, fmt.Errorf("failed to create data directory for node %d: %v", i, err) - } - - // Collect addresses from previous nodes as bootstrap addresses - bootstrapAddresses := make([]string, i) - for j := 0; j < i; j++ { - bootstrapAddresses[j] = nodeConfigs[j].Address - } - - p2pConfig := &p2p.Config{ - ListenAddress: "127.0.0.1", - Port: port, - DataDir: dataDir, - ID: address, - BootstrapIPs: strings.Join(bootstrapAddresses, ","), - } - - // Initialize SQLite RQ store for each node - rqStoreFile := filepath.Join(dataDir, "rqstore.db") - if err := os.MkdirAll(filepath.Dir(rqStoreFile), 0755); err != nil { - return nil, nil, fmt.Errorf("failed to create rqstore directory for node %d: %v", i, err) - } - - rqStore, err := rqstore.NewSQLiteRQStore(rqStoreFile) - if err != nil { - return nil, nil, fmt.Errorf("failed to create rqstore for node %d: %v", i, err) - } - rqStores = append(rqStores, rqStore) - - t.Logf("Creating P2P service for node %d with address %s on port %d", i, address, port) - service, err := p2p.New(ctx, p2pConfig, mockClient, nil, rqStore, nil, nil) - if err != nil { - return nil, nil, fmt.Errorf("failed to create p2p service for node %d: %v", i, err) - } - - // Start P2P service in a goroutine - go func(nodeID int, rqStore *rqstore.SQLiteRQStore) { - defer rqStore.Close() - t.Logf("Starting P2P service for node %d", nodeID) - if err := service.Run(ctx); err != nil && err != context.Canceled { - t.Logf("Node %d P2P service failed: %v", nodeID, err) - } - }(i, rqStore) - - services = append(services, service) - - // Give node time to start up - time.Sleep(1 * time.Second) - } - - t.Log("All P2P services created and started") - - // Give extra time for all nodes to connect - time.Sleep(2 * time.Second) - - return services, rqStores, nil -} diff --git a/tests/system/supernode_sytem_test.go b/tests/system/supernode_sytem_test.go deleted file mode 100644 index 2702067a..00000000 --- a/tests/system/supernode_sytem_test.go +++ /dev/null @@ -1,67 +0,0 @@ -//go:build system_test - -package system - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestSuperNode(t *testing.T) { - // Initialize and reset chain - sut.ResetChain(t) - - // Start the chain - sut.StartChain(t) - - // Create CLI helper - cli := NewLumeradCLI(t, sut, true) - - // Log node addresses - t.Log("Retrieving addresses for all nodes:") - for i := 0; i < sut.nodesCount; i++ { - nodeName := fmt.Sprintf("node%d", i) - address := cli.GetKeyAddr(nodeName) - t.Logf("Node %d (%s) address: %s", i, nodeName, address) - } - - // Create context for P2P operations - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - // Setup P2P services for all Lumera nodes - t.Log("Setting up P2P services...") - services, rqStores, err := SetupP2PServicesForNodes(t, sut, verbose, ctx) - require.NoError(t, err) - t.Logf("Successfully created %d P2P services", len(services)) - - defer func() { - // Cleanup RQ stores - t.Log("Closing RQ stores...") - for i, store := range rqStores { - t.Logf("Closing store %d", i) - store.Close() - } - - // Cleanup data directories - t.Log("Cleaning up test data directories...") - if err := os.RemoveAll("./data"); err != nil { - t.Logf("Warning: Failed to cleanup test data directory: %v", err) - } else { - t.Log("Successfully cleaned up test data directories") - } - }() - - // Wait for P2P network to stabilize - t.Log("Waiting for P2P network to stabilize...") - time.Sleep(10 * time.Second) - - // Very basic check - just log that services were created - t.Logf("Test complete: Successfully created %d P2P services", len(services)) - require.Equal(t, sut.nodesCount, len(services), "Should have one P2P service per node") -} diff --git a/tests/system/system.go b/tests/system/system.go index 2d7dcb38..db18f3f1 100644 --- a/tests/system/system.go +++ b/tests/system/system.go @@ -255,7 +255,7 @@ func (s *SystemUnderTest) AwaitUpgradeInfo(t *testing.T) { case err == nil: found = true case !os.IsNotExist(err): - t.Fatalf(err.Error()) + t.Fatal(err.Error()) } }) time.Sleep(s.blockTime / 2)