diff --git a/Makefile b/Makefile index fd9dfebf..2d300b9b 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,8 @@ -.PHONY: build build-release build-sncli build-sn-manager +.PHONY: build build-sncli build-sn-manager .PHONY: install-lumera setup-supernodes system-test-setup install-deps .PHONY: gen-cascade gen-supernode .PHONY: test-e2e test-unit test-integration test-system +.PHONY: release # Build variables VERSION ?= $(shell git describe --tags --always --dirty 2>/dev/null || echo "dev") @@ -22,11 +23,8 @@ SN_MANAGER_LDFLAGS = -X main.Version=$(VERSION) \ build: @mkdir -p release - CGO_ENABLED=1 \ - GOOS=linux \ - GOARCH=amd64 \ - echo "Building supernode..." - go build \ + @echo "Building supernode..." + CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build \ -trimpath \ -ldflags="-s -w $(LDFLAGS)" \ -o release/supernode-linux-amd64 \ @@ -148,3 +146,30 @@ test-cascade: test-sn-manager: @echo "Running sn-manager e2e tests..." @cd tests/system && go test -tags=system_test -v -run '^TestSNManager' . + + + +# Release command: push branch, tag, and push tag with auto-increment - this is for testing only (including releases) setup a new remote upstream or rename the script +release: + @echo "Getting current branch..." + $(eval CURRENT_BRANCH := $(shell git branch --show-current)) + @echo "Current branch: $(CURRENT_BRANCH)" + + @echo "Getting latest tag..." + $(eval LATEST_TAG := $(shell git tag -l "v*" | sort -V | tail -n1)) + $(eval NEXT_TAG := $(shell \ + if [ -z "$(LATEST_TAG)" ]; then \ + echo "v2.5.0"; \ + else \ + echo "$(LATEST_TAG)" | sed 's/^v//' | awk -F. '{print "v" $$1 "." $$2 "." $$3+1}'; \ + fi)) + @echo "Next tag: $(NEXT_TAG)" + + @echo "Pushing branch to upstream..." + git push upstream $(CURRENT_BRANCH) -f + + @echo "Creating and pushing tag $(NEXT_TAG)..." + git tag $(NEXT_TAG) + git push upstream $(NEXT_TAG) + + @echo "Release complete: $(NEXT_TAG) pushed to upstream" diff --git a/gen/supernode/service.pb.go b/gen/supernode/service.pb.go index b8399095..c70a14dc 100644 --- a/gen/supernode/service.pb.go +++ b/gen/supernode/service.pb.go @@ -163,6 +163,255 @@ func (x *ServiceInfo) GetMethods() []string { return nil } +// Pprof message types +type GetPprofIndexRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetPprofIndexRequest) Reset() { + *x = GetPprofIndexRequest{} + mi := &file_supernode_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetPprofIndexRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPprofIndexRequest) ProtoMessage() {} + +func (x *GetPprofIndexRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPprofIndexRequest.ProtoReflect.Descriptor instead. +func (*GetPprofIndexRequest) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{3} +} + +type GetPprofIndexResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Html string `protobuf:"bytes,1,opt,name=html,proto3" json:"html,omitempty"` // HTML content for the pprof index page + Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"` // Whether profiling is enabled +} + +func (x *GetPprofIndexResponse) Reset() { + *x = GetPprofIndexResponse{} + mi := &file_supernode_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetPprofIndexResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPprofIndexResponse) ProtoMessage() {} + +func (x *GetPprofIndexResponse) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPprofIndexResponse.ProtoReflect.Descriptor instead. +func (*GetPprofIndexResponse) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{4} +} + +func (x *GetPprofIndexResponse) GetHtml() string { + if x != nil { + return x.Html + } + return "" +} + +func (x *GetPprofIndexResponse) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +type GetPprofProfileRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Debug int32 `protobuf:"varint,1,opt,name=debug,proto3" json:"debug,omitempty"` // Debug level (optional, default 1) +} + +func (x *GetPprofProfileRequest) Reset() { + *x = GetPprofProfileRequest{} + mi := &file_supernode_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetPprofProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPprofProfileRequest) ProtoMessage() {} + +func (x *GetPprofProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPprofProfileRequest.ProtoReflect.Descriptor instead. +func (*GetPprofProfileRequest) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{5} +} + +func (x *GetPprofProfileRequest) GetDebug() int32 { + if x != nil { + return x.Debug + } + return 0 +} + +type GetPprofCpuProfileRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Seconds int32 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // Duration in seconds (optional, default 30) +} + +func (x *GetPprofCpuProfileRequest) Reset() { + *x = GetPprofCpuProfileRequest{} + mi := &file_supernode_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetPprofCpuProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPprofCpuProfileRequest) ProtoMessage() {} + +func (x *GetPprofCpuProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPprofCpuProfileRequest.ProtoReflect.Descriptor instead. +func (*GetPprofCpuProfileRequest) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{6} +} + +func (x *GetPprofCpuProfileRequest) GetSeconds() int32 { + if x != nil { + return x.Seconds + } + return 0 +} + +type GetPprofProfileResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // Profile data (binary pprof format) + ContentType string `protobuf:"bytes,2,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` // Content type of the response + Enabled bool `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"` // Whether profiling is enabled + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` // Error message if profiling is disabled +} + +func (x *GetPprofProfileResponse) Reset() { + *x = GetPprofProfileResponse{} + mi := &file_supernode_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetPprofProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPprofProfileResponse) ProtoMessage() {} + +func (x *GetPprofProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPprofProfileResponse.ProtoReflect.Descriptor instead. +func (*GetPprofProfileResponse) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{7} +} + +func (x *GetPprofProfileResponse) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *GetPprofProfileResponse) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *GetPprofProfileResponse) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +func (x *GetPprofProfileResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + var File_supernode_service_proto protoreflect.FileDescriptor var file_supernode_service_proto_rawDesc = []byte{ @@ -183,24 +432,108 @@ var file_supernode_service_proto_rawDesc = []byte{ 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, - 0x32, 0xd7, 0x01, 0x0a, 0x10, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, - 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, - 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x22, 0x16, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x45, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, + 0x70, 0x72, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x74, 0x6d, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x68, 0x74, 0x6d, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, + 0x2e, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x22, + 0x35, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x43, 0x70, 0x75, 0x50, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x80, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x32, 0xc5, 0x09, 0x0a, 0x10, 0x53, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, + 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, + 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x12, 0x12, 0x10, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x6f, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, + 0x13, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x70, + 0x70, 0x72, 0x6f, 0x66, 0x12, 0x77, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, + 0x48, 0x65, 0x61, 0x70, 0x12, 0x21, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x1a, 0x12, 0x18, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x68, 0x65, 0x61, 0x70, 0x12, 0x81, 0x01, + 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x47, 0x6f, 0x72, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x65, 0x12, 0x21, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x1f, 0x12, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, + 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x65, 0x12, 0x7b, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x41, 0x6c, 0x6c, + 0x6f, 0x63, 0x73, 0x12, 0x21, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x1c, 0x12, 0x1a, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, + 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x73, 0x12, 0x79, + 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, + 0x21, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, + 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x70, 0x70, + 0x72, 0x6f, 0x66, 0x2f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x79, 0x0a, 0x0d, 0x47, 0x65, 0x74, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4d, 0x75, 0x74, 0x65, 0x78, 0x12, 0x21, 0x2e, 0x73, 0x75, 0x70, + 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, + 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x6d, + 0x75, 0x74, 0x65, 0x78, 0x12, 0x87, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, + 0x66, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x21, 0x2e, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, + 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, + 0x66, 0x2f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x80, + 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x12, 0x24, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, + 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x43, 0x70, 0x75, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x50, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x1d, 0x12, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -215,25 +548,46 @@ func file_supernode_service_proto_rawDescGZIP() []byte { return file_supernode_service_proto_rawDescData } -var file_supernode_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_supernode_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_supernode_service_proto_goTypes = []any{ - (*ListServicesRequest)(nil), // 0: supernode.ListServicesRequest - (*ListServicesResponse)(nil), // 1: supernode.ListServicesResponse - (*ServiceInfo)(nil), // 2: supernode.ServiceInfo - (*StatusRequest)(nil), // 3: supernode.StatusRequest - (*StatusResponse)(nil), // 4: supernode.StatusResponse + (*ListServicesRequest)(nil), // 0: supernode.ListServicesRequest + (*ListServicesResponse)(nil), // 1: supernode.ListServicesResponse + (*ServiceInfo)(nil), // 2: supernode.ServiceInfo + (*GetPprofIndexRequest)(nil), // 3: supernode.GetPprofIndexRequest + (*GetPprofIndexResponse)(nil), // 4: supernode.GetPprofIndexResponse + (*GetPprofProfileRequest)(nil), // 5: supernode.GetPprofProfileRequest + (*GetPprofCpuProfileRequest)(nil), // 6: supernode.GetPprofCpuProfileRequest + (*GetPprofProfileResponse)(nil), // 7: supernode.GetPprofProfileResponse + (*StatusRequest)(nil), // 8: supernode.StatusRequest + (*StatusResponse)(nil), // 9: supernode.StatusResponse } var file_supernode_service_proto_depIdxs = []int32{ - 2, // 0: supernode.ListServicesResponse.services:type_name -> supernode.ServiceInfo - 3, // 1: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest - 0, // 2: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest - 4, // 3: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse - 1, // 4: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse - 3, // [3:5] is the sub-list for method output_type - 1, // [1:3] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 0: supernode.ListServicesResponse.services:type_name -> supernode.ServiceInfo + 8, // 1: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest + 0, // 2: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest + 3, // 3: supernode.SupernodeService.GetPprofIndex:input_type -> supernode.GetPprofIndexRequest + 5, // 4: supernode.SupernodeService.GetPprofHeap:input_type -> supernode.GetPprofProfileRequest + 5, // 5: supernode.SupernodeService.GetPprofGoroutine:input_type -> supernode.GetPprofProfileRequest + 5, // 6: supernode.SupernodeService.GetPprofAllocs:input_type -> supernode.GetPprofProfileRequest + 5, // 7: supernode.SupernodeService.GetPprofBlock:input_type -> supernode.GetPprofProfileRequest + 5, // 8: supernode.SupernodeService.GetPprofMutex:input_type -> supernode.GetPprofProfileRequest + 5, // 9: supernode.SupernodeService.GetPprofThreadcreate:input_type -> supernode.GetPprofProfileRequest + 6, // 10: supernode.SupernodeService.GetPprofProfile:input_type -> supernode.GetPprofCpuProfileRequest + 9, // 11: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse + 1, // 12: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse + 4, // 13: supernode.SupernodeService.GetPprofIndex:output_type -> supernode.GetPprofIndexResponse + 7, // 14: supernode.SupernodeService.GetPprofHeap:output_type -> supernode.GetPprofProfileResponse + 7, // 15: supernode.SupernodeService.GetPprofGoroutine:output_type -> supernode.GetPprofProfileResponse + 7, // 16: supernode.SupernodeService.GetPprofAllocs:output_type -> supernode.GetPprofProfileResponse + 7, // 17: supernode.SupernodeService.GetPprofBlock:output_type -> supernode.GetPprofProfileResponse + 7, // 18: supernode.SupernodeService.GetPprofMutex:output_type -> supernode.GetPprofProfileResponse + 7, // 19: supernode.SupernodeService.GetPprofThreadcreate:output_type -> supernode.GetPprofProfileResponse + 7, // 20: supernode.SupernodeService.GetPprofProfile:output_type -> supernode.GetPprofProfileResponse + 11, // [11:21] is the sub-list for method output_type + 1, // [1:11] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_supernode_service_proto_init() } @@ -248,7 +602,7 @@ func file_supernode_service_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_supernode_service_proto_rawDesc, NumEnums: 0, - NumMessages: 3, + NumMessages: 8, NumExtensions: 0, NumServices: 1, }, diff --git a/gen/supernode/service.pb.gw.go b/gen/supernode/service.pb.gw.go index 326bccf3..9cdfafbc 100644 --- a/gen/supernode/service.pb.gw.go +++ b/gen/supernode/service.pb.gw.go @@ -87,13 +87,375 @@ func local_request_SupernodeService_ListServices_0(ctx context.Context, marshale } +func request_SupernodeService_GetPprofIndex_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofIndexRequest + var metadata runtime.ServerMetadata + + msg, err := client.GetPprofIndex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofIndex_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofIndexRequest + var metadata runtime.ServerMetadata + + msg, err := server.GetPprofIndex(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetPprofHeap_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofHeap_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetPprofHeap(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofHeap_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetPprofHeap(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetPprofGoroutine_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofGoroutine_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetPprofGoroutine(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofGoroutine_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetPprofGoroutine(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetPprofAllocs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofAllocs_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetPprofAllocs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofAllocs_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetPprofAllocs(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetPprofBlock_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofBlock_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetPprofBlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofBlock_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetPprofBlock(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetPprofMutex_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofMutex_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetPprofMutex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofMutex_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetPprofMutex(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetPprofThreadcreate_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofThreadcreate_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetPprofThreadcreate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofThreadcreate_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetPprofThreadcreate(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetPprofProfile_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofCpuProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofProfile_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetPprofProfile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPprofCpuProfileRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetPprofProfile_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetPprofProfile(ctx, &protoReq) + return msg, metadata, err + +} + // RegisterSupernodeServiceHandlerServer registers the http handlers for service SupernodeService to "mux". // UnaryRPC :call SupernodeServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. // Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterSupernodeServiceHandlerFromEndpoint instead. func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SupernodeServiceServer) error { - mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetStatus_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_ListServices_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_ListServices_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetPprofIndex_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetPprofHeap_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofHeap_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -104,7 +466,7 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_GetStatus_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_GetPprofGoroutine_0(rctx, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { @@ -112,11 +474,11 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser return } - forward_SupernodeService_GetStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_SupernodeService_GetPprofGoroutine_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_SupernodeService_GetPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -127,7 +489,7 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_SupernodeService_ListServices_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_SupernodeService_GetPprofAllocs_0(rctx, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { @@ -135,7 +497,99 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser return } - forward_SupernodeService_ListServices_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_SupernodeService_GetPprofAllocs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetPprofBlock_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofBlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetPprofMutex_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofMutex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetPprofThreadcreate_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofThreadcreate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetPprofProfile_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofProfile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -220,6 +674,166 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser }) + mux.Handle("GET", pattern_SupernodeService_GetPprofIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofIndex_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofHeap_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofHeap_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofGoroutine_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofGoroutine_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofAllocs_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofAllocs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofBlock_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofBlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofMutex_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofMutex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofThreadcreate_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofThreadcreate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetPprofProfile_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetPprofProfile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -227,10 +841,42 @@ var ( pattern_SupernodeService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "status"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SupernodeService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "services"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofIndex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "debug", "pprof"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "heap"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofGoroutine_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "goroutine"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofAllocs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "allocs"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofBlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "block"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofMutex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "mutex"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofThreadcreate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "threadcreate"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SupernodeService_GetPprofProfile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "pprof", "profile"}, "", runtime.AssumeColonVerbOpt(true))) ) var ( forward_SupernodeService_GetStatus_0 = runtime.ForwardResponseMessage forward_SupernodeService_ListServices_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofIndex_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofHeap_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofGoroutine_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofAllocs_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofBlock_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofMutex_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofThreadcreate_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetPprofProfile_0 = runtime.ForwardResponseMessage ) diff --git a/gen/supernode/service.swagger.json b/gen/supernode/service.swagger.json index 0b515a20..6e04d0b9 100644 --- a/gen/supernode/service.swagger.json +++ b/gen/supernode/service.swagger.json @@ -16,6 +16,253 @@ "application/json" ], "paths": { + "/api/v1/debug/pprof": { + "get": { + "summary": "Profiling endpoints", + "operationId": "SupernodeService_GetPprofIndex", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofIndexResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/pprof/allocs": { + "get": { + "operationId": "SupernodeService_GetPprofAllocs", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofProfileResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (optional, default 1)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/pprof/block": { + "get": { + "operationId": "SupernodeService_GetPprofBlock", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofProfileResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (optional, default 1)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/pprof/goroutine": { + "get": { + "operationId": "SupernodeService_GetPprofGoroutine", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofProfileResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (optional, default 1)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/pprof/heap": { + "get": { + "operationId": "SupernodeService_GetPprofHeap", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofProfileResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (optional, default 1)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/pprof/mutex": { + "get": { + "operationId": "SupernodeService_GetPprofMutex", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofProfileResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (optional, default 1)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/pprof/profile": { + "get": { + "operationId": "SupernodeService_GetPprofProfile", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofProfileResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "seconds", + "description": "Duration in seconds (optional, default 30)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/pprof/threadcreate": { + "get": { + "operationId": "SupernodeService_GetPprofThreadcreate", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeGetPprofProfileResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (optional, default 1)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, "/api/v1/services": { "get": { "operationId": "SupernodeService_ListServices", @@ -419,6 +666,41 @@ } } }, + "supernodeGetPprofIndexResponse": { + "type": "object", + "properties": { + "html": { + "type": "string", + "title": "HTML content for the pprof index page" + }, + "enabled": { + "type": "boolean", + "title": "Whether profiling is enabled" + } + } + }, + "supernodeGetPprofProfileResponse": { + "type": "object", + "properties": { + "data": { + "type": "string", + "format": "byte", + "title": "Profile data (binary pprof format)" + }, + "contentType": { + "type": "string", + "title": "Content type of the response" + }, + "enabled": { + "type": "boolean", + "title": "Whether profiling is enabled" + }, + "error": { + "type": "string", + "title": "Error message if profiling is disabled" + } + } + }, "supernodeListServicesResponse": { "type": "object", "properties": { diff --git a/gen/supernode/service_grpc.pb.go b/gen/supernode/service_grpc.pb.go index acb2e4c9..98db9323 100644 --- a/gen/supernode/service_grpc.pb.go +++ b/gen/supernode/service_grpc.pb.go @@ -19,8 +19,16 @@ import ( const _ = grpc.SupportPackageIsVersion9 const ( - SupernodeService_GetStatus_FullMethodName = "/supernode.SupernodeService/GetStatus" - SupernodeService_ListServices_FullMethodName = "/supernode.SupernodeService/ListServices" + SupernodeService_GetStatus_FullMethodName = "/supernode.SupernodeService/GetStatus" + SupernodeService_ListServices_FullMethodName = "/supernode.SupernodeService/ListServices" + SupernodeService_GetPprofIndex_FullMethodName = "/supernode.SupernodeService/GetPprofIndex" + SupernodeService_GetPprofHeap_FullMethodName = "/supernode.SupernodeService/GetPprofHeap" + SupernodeService_GetPprofGoroutine_FullMethodName = "/supernode.SupernodeService/GetPprofGoroutine" + SupernodeService_GetPprofAllocs_FullMethodName = "/supernode.SupernodeService/GetPprofAllocs" + SupernodeService_GetPprofBlock_FullMethodName = "/supernode.SupernodeService/GetPprofBlock" + SupernodeService_GetPprofMutex_FullMethodName = "/supernode.SupernodeService/GetPprofMutex" + SupernodeService_GetPprofThreadcreate_FullMethodName = "/supernode.SupernodeService/GetPprofThreadcreate" + SupernodeService_GetPprofProfile_FullMethodName = "/supernode.SupernodeService/GetPprofProfile" ) // SupernodeServiceClient is the client API for SupernodeService service. @@ -31,6 +39,15 @@ const ( type SupernodeServiceClient interface { GetStatus(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) + // Profiling endpoints + GetPprofIndex(ctx context.Context, in *GetPprofIndexRequest, opts ...grpc.CallOption) (*GetPprofIndexResponse, error) + GetPprofHeap(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) + GetPprofGoroutine(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) + GetPprofAllocs(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) + GetPprofBlock(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) + GetPprofMutex(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) + GetPprofThreadcreate(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) + GetPprofProfile(ctx context.Context, in *GetPprofCpuProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) } type supernodeServiceClient struct { @@ -61,6 +78,86 @@ func (c *supernodeServiceClient) ListServices(ctx context.Context, in *ListServi return out, nil } +func (c *supernodeServiceClient) GetPprofIndex(ctx context.Context, in *GetPprofIndexRequest, opts ...grpc.CallOption) (*GetPprofIndexResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofIndexResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofIndex_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetPprofHeap(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofProfileResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofHeap_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetPprofGoroutine(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofProfileResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofGoroutine_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetPprofAllocs(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofProfileResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofAllocs_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetPprofBlock(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofProfileResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofBlock_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetPprofMutex(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofProfileResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofMutex_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetPprofThreadcreate(ctx context.Context, in *GetPprofProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofProfileResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofThreadcreate_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetPprofProfile(ctx context.Context, in *GetPprofCpuProfileRequest, opts ...grpc.CallOption) (*GetPprofProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPprofProfileResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetPprofProfile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + // SupernodeServiceServer is the server API for SupernodeService service. // All implementations must embed UnimplementedSupernodeServiceServer // for forward compatibility. @@ -69,6 +166,15 @@ func (c *supernodeServiceClient) ListServices(ctx context.Context, in *ListServi type SupernodeServiceServer interface { GetStatus(context.Context, *StatusRequest) (*StatusResponse, error) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) + // Profiling endpoints + GetPprofIndex(context.Context, *GetPprofIndexRequest) (*GetPprofIndexResponse, error) + GetPprofHeap(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) + GetPprofGoroutine(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) + GetPprofAllocs(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) + GetPprofBlock(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) + GetPprofMutex(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) + GetPprofThreadcreate(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) + GetPprofProfile(context.Context, *GetPprofCpuProfileRequest) (*GetPprofProfileResponse, error) mustEmbedUnimplementedSupernodeServiceServer() } @@ -85,6 +191,30 @@ func (UnimplementedSupernodeServiceServer) GetStatus(context.Context, *StatusReq func (UnimplementedSupernodeServiceServer) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListServices not implemented") } +func (UnimplementedSupernodeServiceServer) GetPprofIndex(context.Context, *GetPprofIndexRequest) (*GetPprofIndexResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofIndex not implemented") +} +func (UnimplementedSupernodeServiceServer) GetPprofHeap(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofHeap not implemented") +} +func (UnimplementedSupernodeServiceServer) GetPprofGoroutine(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofGoroutine not implemented") +} +func (UnimplementedSupernodeServiceServer) GetPprofAllocs(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofAllocs not implemented") +} +func (UnimplementedSupernodeServiceServer) GetPprofBlock(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofBlock not implemented") +} +func (UnimplementedSupernodeServiceServer) GetPprofMutex(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofMutex not implemented") +} +func (UnimplementedSupernodeServiceServer) GetPprofThreadcreate(context.Context, *GetPprofProfileRequest) (*GetPprofProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofThreadcreate not implemented") +} +func (UnimplementedSupernodeServiceServer) GetPprofProfile(context.Context, *GetPprofCpuProfileRequest) (*GetPprofProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPprofProfile not implemented") +} func (UnimplementedSupernodeServiceServer) mustEmbedUnimplementedSupernodeServiceServer() {} func (UnimplementedSupernodeServiceServer) testEmbeddedByValue() {} @@ -142,6 +272,150 @@ func _SupernodeService_ListServices_Handler(srv interface{}, ctx context.Context return interceptor(ctx, in, info, handler) } +func _SupernodeService_GetPprofIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofIndexRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofIndex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofIndex_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofIndex(ctx, req.(*GetPprofIndexRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetPprofHeap_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofHeap(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofHeap_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofHeap(ctx, req.(*GetPprofProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetPprofGoroutine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofGoroutine(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofGoroutine_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofGoroutine(ctx, req.(*GetPprofProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetPprofAllocs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofAllocs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofAllocs_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofAllocs(ctx, req.(*GetPprofProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetPprofBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofBlock_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofBlock(ctx, req.(*GetPprofProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetPprofMutex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofMutex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofMutex_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofMutex(ctx, req.(*GetPprofProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetPprofThreadcreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofThreadcreate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofThreadcreate_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofThreadcreate(ctx, req.(*GetPprofProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetPprofProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPprofCpuProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetPprofProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetPprofProfile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetPprofProfile(ctx, req.(*GetPprofCpuProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + // SupernodeService_ServiceDesc is the grpc.ServiceDesc for SupernodeService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -157,6 +431,38 @@ var SupernodeService_ServiceDesc = grpc.ServiceDesc{ MethodName: "ListServices", Handler: _SupernodeService_ListServices_Handler, }, + { + MethodName: "GetPprofIndex", + Handler: _SupernodeService_GetPprofIndex_Handler, + }, + { + MethodName: "GetPprofHeap", + Handler: _SupernodeService_GetPprofHeap_Handler, + }, + { + MethodName: "GetPprofGoroutine", + Handler: _SupernodeService_GetPprofGoroutine_Handler, + }, + { + MethodName: "GetPprofAllocs", + Handler: _SupernodeService_GetPprofAllocs_Handler, + }, + { + MethodName: "GetPprofBlock", + Handler: _SupernodeService_GetPprofBlock_Handler, + }, + { + MethodName: "GetPprofMutex", + Handler: _SupernodeService_GetPprofMutex_Handler, + }, + { + MethodName: "GetPprofThreadcreate", + Handler: _SupernodeService_GetPprofThreadcreate_Handler, + }, + { + MethodName: "GetPprofProfile", + Handler: _SupernodeService_GetPprofProfile_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "supernode/service.proto", diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index 62aa2768..808f9fd5 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -1660,9 +1660,8 @@ func (s *DHT) addKnownNodes(ctx context.Context, nodes []*Node, knownNodes map[s // If the success rate is below `minimumDataStoreSuccessRate`, an error is // returned alongside the measured rate and request count. func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, id string) error { - globalClosestContacts := make(map[string]*NodeList) knownNodes := make(map[string]*Node) - hashes := make([][]byte, len(values)) + storageMap := make(map[string][]int) { f := logtrace.Fields{logtrace.FieldModule: "dht", "task_id": id, "keys": len(values), "len_nodes": len(s.ht.nodes()), logtrace.FieldRole: "client"} @@ -1673,19 +1672,11 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i } for i := 0; i < len(values); i++ { target, _ := utils.Blake3Hash(values[i]) - hashes[i] = target top6 := s.ht.closestContactsWithIncludingNode(Alpha, target, s.ignorelist.ToNodeList(), nil) - - globalClosestContacts[base58.Encode(target)] = top6 // log.WithContext(ctx).WithField("top 6", top6).Info("iterate batch store begin") s.addKnownNodes(ctx, top6.Nodes, knownNodes) - } - - storageMap := make(map[string][]int) // This will store the index of the data in the values array that needs to be stored to the node - for i := 0; i < len(hashes); i++ { - storageNodes := globalClosestContacts[base58.Encode(hashes[i])] - for j := 0; j < len(storageNodes.Nodes); j++ { - storageMap[string(storageNodes.Nodes[j].ID)] = append(storageMap[string(storageNodes.Nodes[j].ID)], i) + for j := 0; j < len(top6.Nodes); j++ { + storageMap[string(top6.Nodes[j].ID)] = append(storageMap[string(top6.Nodes[j].ID)], i) } } @@ -1731,8 +1722,6 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i } } - // per-node store metrics removed; logs retained - } if requests > 0 { @@ -1761,7 +1750,14 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i } func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[string]*Node, storageMap map[string][]int, typ int) chan *MessageWithError { - responses := make(chan *MessageWithError, len(nodes)) + buf := len(nodes) + if buf > macConcurrentNetworkStoreCalls { + buf = macConcurrentNetworkStoreCalls + } + if buf < 1 { + buf = 1 + } + responses := make(chan *MessageWithError, buf) maxStore := macConcurrentNetworkStoreCalls if ln := len(nodes); ln < maxStore { maxStore = ln diff --git a/p2p/kademlia/message.go b/p2p/kademlia/message.go index 4f778d1f..0ba949cf 100644 --- a/p2p/kademlia/message.go +++ b/p2p/kademlia/message.go @@ -167,39 +167,43 @@ type BatchGetValuesResponse struct { } // encode the message -func encode(message *Message) ([]byte, error) { +// encodePayload gob-encodes the message and returns only the payload bytes (no header). +// Callers can write the 8-byte header and payload separately to avoid duplicating large buffers. +func encodePayload(message *Message) ([]byte, error) { var buf bytes.Buffer - - encoder := gob.NewEncoder(&buf) - // encode the message with gob library - if err := encoder.Encode(message); err != nil { + enc := gob.NewEncoder(&buf) + if err := enc.Encode(message); err != nil { return nil, err } - // Check against absolute maximum first const maxMessageSize = 500 * 1024 * 1024 // 500MB absolute max if buf.Len() > maxMessageSize { return nil, errors.New("message size exceeds absolute maximum") } - if utils.BytesIntToMB(buf.Len()) > defaultMaxPayloadSize { return nil, errors.New("payload too big") } + return buf.Bytes(), nil +} +// encode builds the full on-wire message (header + payload) as a single slice. +// Legacy callers may use this; new code should prefer encodePayload and write header+payload separately. +func encode(message *Message) ([]byte, error) { + payload, err := encodePayload(message) + if err != nil { + return nil, err + } var header [8]byte - // prepare the header - binary.PutUvarint(header[:], uint64(buf.Len())) - - var data []byte - data = append(data, header[:]...) - data = append(data, buf.Bytes()...) - - return data, nil + binary.PutUvarint(header[:], uint64(len(payload))) + out := make([]byte, 0, len(header)+len(payload)) + out = append(out, header[:]...) + out = append(out, payload...) + return out, nil } // decode the message func decode(conn io.Reader) (*Message, error) { - // read the header + // read the header (fixed 8 bytes carrying a uvarint length) header := make([]byte, 8) if _, err := io.ReadFull(conn, header); err != nil { return nil, err @@ -216,26 +220,23 @@ func decode(conn io.Reader) (*Message, error) { if length > maxMessageSize { return nil, errors.New("message size exceeds absolute maximum") } - if utils.BytesToMB(length) > defaultMaxPayloadSize { return nil, errors.New("payload too big") } - // read the message body - data := make([]byte, length) - if _, err := io.ReadFull(conn, data); err != nil { + // Stream-decode directly from the connection without allocating a full buffer + lr := &io.LimitedReader{R: conn, N: int64(length)} + dec := gob.NewDecoder(lr) + msg := &Message{} + if err := dec.Decode(msg); err != nil { return nil, err } - - // new a decoder - decoder := gob.NewDecoder(bytes.NewBuffer(data)) - // decode the message structure - message := &Message{} - if err = decoder.Decode(message); err != nil { - return nil, err + // If gob didn't consume exactly 'length' bytes, drain the remainder to keep the stream aligned + if lr.N > 0 { + // best-effort drain; ignore errors to avoid perturbing the handler + _, _ = io.CopyN(io.Discard, lr, lr.N) } - - return message, nil + return msg, nil } // BatchStoreDataRequest defines the request data for store data diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index ef542ee5..78b3f204 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -2,6 +2,7 @@ package kademlia import ( "context" + "encoding/binary" "fmt" "io" "net" @@ -130,13 +131,12 @@ func (s *Network) Stop(ctx context.Context) { } func (s *Network) encodeMesage(mesage *Message) ([]byte, error) { - // send the response to client - encoded, err := encode(mesage) + // Return gob-encoded payload only; caller writes header + payload to the conn + payload, err := encodePayload(mesage) if err != nil { return nil, errors.Errorf("encode response: %w", err) } - - return encoded, nil + return payload, nil } func (s *Network) handleFindNode(ctx context.Context, message *Message) (res []byte, err error) { @@ -478,6 +478,19 @@ func (s *Network) handleConn(ctx context.Context, rawConn net.Conn) { // write the response (transport write failures counted as well) _ = conn.SetWriteDeadline(time.Now().Add(serverReadTimeout)) + // response currently carries gob-encoded payload only; prefix with 8-byte header + var hdr [8]byte + binary.PutUvarint(hdr[:], uint64(len(response))) + if _, err := conn.Write(hdr[:]); err != nil { + s.markTransportWrite(mt, err) + logtrace.Error(ctx, "Write failed", logtrace.Fields{ + logtrace.FieldModule: "p2p", + logtrace.FieldError: err.Error(), + "p2p-req-id": reqID, + "message-type": mt, + }) + return + } if _, err := conn.Write(response); err != nil { s.markTransportWrite(mt, err) logtrace.Error(ctx, "Write failed", logtrace.Fields{ @@ -645,26 +658,28 @@ func (s *Network) Call(ctx context.Context, request *Message, isLong bool) (*Mes s.connPoolMtx.Unlock() } - // Encode once - data, err := encode(request) + // Encode once (payload only); write header and payload separately to avoid extra copies + payload, err := encodePayload(request) if err != nil { return nil, errors.Errorf("encode: %w", err) } + var header [8]byte + binary.PutUvarint(header[:], uint64(len(payload))) // Wrapper: lock whole RPC to prevent cross-talk; retry once on stale pooled socket if cw, ok := conn.(*connWrapper); ok { - return s.rpcOnceWrapper(ctx, cw, remoteAddr, data, timeout, request.MessageType) + return s.rpcOnceWrapper(ctx, cw, remoteAddr, header[:], payload, timeout, request.MessageType) } // Non-wrapper fallback: one stale retry - return s.rpcOnceNonWrapper(ctx, conn, remoteAddr, data, timeout, request.MessageType) + return s.rpcOnceNonWrapper(ctx, conn, remoteAddr, header[:], payload, timeout, request.MessageType) } // ---- retryable RPC helpers ------------------------------------------------- -func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAddr string, data []byte, timeout time.Duration, msgType int) (*Message, error) { +func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAddr string, header []byte, payload []byte, timeout time.Duration, msgType int) (*Message, error) { start := time.Now() - writeDL := calcWriteDeadline(timeout, len(data), 1.0) // target ~1 MB/s + writeDL := calcWriteDeadline(timeout, len(header)+len(payload), 1.0) // target ~1 MB/s retried := false for { @@ -677,7 +692,38 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd s.dropFromPool(remoteAddr, cw) return nil, errors.Errorf("set write deadline: %w", e) } - if _, e := cw.secureConn.Write(data); e != nil { + if _, e := cw.secureConn.Write(header); e != nil { + cw.mtx.Unlock() + if isStaleConnError(e) && !retried { + logtrace.Debug(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{ + logtrace.FieldModule: "p2p", + "remote": remoteAddr, + "message_type": msgType, + }) + s.dropFromPool(remoteAddr, cw) + fresh, derr := NewSecureClientConn(ctx, s.clientTC, remoteAddr) + if derr != nil { + logtrace.Error(ctx, "Retry redial failed (write)", logtrace.Fields{ + logtrace.FieldModule: "p2p", + "remote": remoteAddr, + "message_type": msgType, + logtrace.FieldError: derr.Error(), + }) + return nil, errors.Errorf("re-dial after write: %w", derr) + } + s.addToPool(remoteAddr, fresh) + if nw, ok := fresh.(*connWrapper); ok { + cw = nw + retried = true + continue // retry whole RPC under the new wrapper + } + // Non-wrapper fallback retry + return s.rpcOnceNonWrapper(ctx, fresh, remoteAddr, header, payload, timeout, msgType) + } + s.dropFromPool(remoteAddr, cw) + return nil, errors.Errorf("conn write: %w", e) + } + if _, e := cw.secureConn.Write(payload); e != nil { cw.mtx.Unlock() if isStaleConnError(e) && !retried { logtrace.Debug(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{ @@ -703,7 +749,7 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd continue // retry whole RPC under the new wrapper } // Non-wrapper fallback retry - return s.rpcOnceNonWrapper(ctx, fresh, remoteAddr, data, timeout, msgType) + return s.rpcOnceNonWrapper(ctx, fresh, remoteAddr, header, payload, timeout, msgType) } s.dropFromPool(remoteAddr, cw) return nil, errors.Errorf("conn write: %w", e) @@ -743,7 +789,7 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd retried = true continue // retry whole RPC } - return s.rpcOnceNonWrapper(ctx, fresh, remoteAddr, data, timeout, msgType) + return s.rpcOnceNonWrapper(ctx, fresh, remoteAddr, header, payload, timeout, msgType) } s.dropFromPool(remoteAddr, cw) return nil, errors.Errorf("conn read: %w", e) @@ -762,10 +808,10 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd } } -func (s *Network) rpcOnceNonWrapper(ctx context.Context, conn net.Conn, remoteAddr string, data []byte, timeout time.Duration, msgType int) (*Message, error) { +func (s *Network) rpcOnceNonWrapper(ctx context.Context, conn net.Conn, remoteAddr string, header []byte, payload []byte, timeout time.Duration, msgType int) (*Message, error) { start := time.Now() - sizeMB := float64(len(data)) / (1024.0 * 1024.0) // data is your gob-encoded message - throughputFloor := 8.0 // MB/s (~64 Mbps) + sizeMB := float64(len(header)+len(payload)) / (1024.0 * 1024.0) // total bytes to write + throughputFloor := 8.0 // MB/s (~64 Mbps) est := time.Duration(sizeMB / throughputFloor * float64(time.Second)) base := 1 * time.Second cushion := 5 * time.Second @@ -783,7 +829,33 @@ Retry: s.dropFromPool(remoteAddr, conn) return nil, errors.Errorf("set write deadline: %w", err) } - if _, err := conn.Write(data); err != nil { + if _, err := conn.Write(header); err != nil { + if isStaleConnError(err) && !retried { + logtrace.Debug(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{ + logtrace.FieldModule: "p2p", + "remote": remoteAddr, + "message_type": msgType, + }) + s.dropFromPool(remoteAddr, conn) + fresh, derr := NewSecureClientConn(ctx, s.clientTC, remoteAddr) + if derr != nil { + logtrace.Error(ctx, "Retry redial failed (write)", logtrace.Fields{ + logtrace.FieldModule: "p2p", + "remote": remoteAddr, + "message_type": msgType, + logtrace.FieldError: derr.Error(), + }) + return nil, errors.Errorf("re-dial after write: %w", derr) + } + s.addToPool(remoteAddr, fresh) + conn = fresh + retried = true + goto Retry + } + s.dropFromPool(remoteAddr, conn) + return nil, errors.Errorf("conn write: %w", err) + } + if _, err := conn.Write(payload); err != nil { if isStaleConnError(err) && !retried { logtrace.Debug(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{ logtrace.FieldModule: "p2p", diff --git a/p2p/p2p.go b/p2p/p2p.go index f9a5f74e..ee666e30 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -55,6 +55,13 @@ func (s *p2p) Run(ctx context.Context) error { for { select { case <-ctx.Done(): + // Ensure cleanup even if inner run() never started + if s.dht != nil { + s.dht.Stop(ctx) + } + if s.store != nil { + s.store.Close(ctx) + } return nil case <-time.After(5 * time.Second): if err := s.run(ctx); err != nil { diff --git a/pkg/cascadekit/serialize.go b/pkg/cascadekit/serialize.go new file mode 100644 index 00000000..21cef3d9 --- /dev/null +++ b/pkg/cascadekit/serialize.go @@ -0,0 +1,29 @@ +package cascadekit + +import ( + "encoding/base64" + "encoding/json" + + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" +) + +// LayoutJSON marshals a codec.Layout using the standard library encoder. +func LayoutJSON(layout codec.Layout) ([]byte, error) { + b, err := json.Marshal(layout) + if err != nil { + return nil, errors.Errorf("marshal layout: %w", err) + } + return b, nil +} + +// LayoutB64 returns base64(JSON(layout)) bytes using encoding/json for deterministic output. +func LayoutB64(layout codec.Layout) ([]byte, error) { + raw, err := LayoutJSON(layout) + if err != nil { + return nil, err + } + out := make([]byte, base64.StdEncoding.EncodedLen(len(raw))) + base64.StdEncoding.Encode(out, raw) + return out, nil +} diff --git a/pkg/codec/decode.go b/pkg/codec/decode.go index 348894e4..4d8ae5f7 100644 --- a/pkg/codec/decode.go +++ b/pkg/codec/decode.go @@ -173,9 +173,39 @@ func (rq *raptorQ) DecodeFromPrepared( } defer processor.Free() - // Write layout.json (idempotent) + // Write layout.json (idempotent). Important: encoder_parameters must be a JSON array, not base64 string. + // Go's encoding/json marshals []byte (aka []uint8) as base64 strings, which rq-go rejects. + // Use a wire struct that maps encoder_parameters to []int to produce a numeric array. + type blockOnDisk struct { + BlockID int `json:"block_id"` + EncoderParameters []int `json:"encoder_parameters"` + OriginalOffset int64 `json:"original_offset"` + Size int64 `json:"size"` + Symbols []string `json:"symbols"` + Hash string `json:"hash"` + } + type layoutOnDisk struct { + Blocks []blockOnDisk `json:"blocks"` + } + var lod layoutOnDisk + lod.Blocks = make([]blockOnDisk, len(layout.Blocks)) + for i, b := range layout.Blocks { + // convert []uint8 (aka []byte) to []int so JSON encodes as numeric array + ep := make([]int, len(b.EncoderParameters)) + for j := range b.EncoderParameters { + ep[j] = int(b.EncoderParameters[j]) + } + lod.Blocks[i] = blockOnDisk{ + BlockID: b.BlockID, + EncoderParameters: ep, + OriginalOffset: b.OriginalOffset, + Size: b.Size, + Symbols: b.Symbols, + Hash: b.Hash, + } + } layoutPath := filepath.Join(ws.SymbolsDir, "layout.json") - layoutBytes, err := json.Marshal(layout) + layoutBytes, err := json.Marshal(lod) if err != nil { fields[logtrace.FieldError] = err.Error() return DecodeResponse{}, fmt.Errorf("marshal layout: %w", err) diff --git a/pkg/logtrace/log.go b/pkg/logtrace/log.go index 469b32e8..6e27b020 100644 --- a/pkg/logtrace/log.go +++ b/pkg/logtrace/log.go @@ -62,7 +62,7 @@ func Setup(serviceName string) { // getLogLevel returns the log level from environment variable LOG_LEVEL func getLogLevel() zapcore.Level { - levelStr := "info" + levelStr := strings.ToLower(os.Getenv("LOG_LEVEL")) switch levelStr { case "debug": return zapcore.DebugLevel diff --git a/pkg/lumera/connection.go b/pkg/lumera/connection.go index 8abdc0f5..bcf093dd 100644 --- a/pkg/lumera/connection.go +++ b/pkg/lumera/connection.go @@ -97,7 +97,8 @@ func newGRPCConnection(ctx context.Context, rawAddr string) (Connection, error) firstConn = r.conn firstCand = r.cand winnerIndex = i - // Do not break yet; continue receiving to close any late winners. + // Cancel other attempts immediately; continue to drain results to avoid leaks. + cancelAll() continue } // Close any non-winning connection to avoid leaks. @@ -114,11 +115,19 @@ func newGRPCConnection(ctx context.Context, rawAddr string) (Connection, error) if firstErr == nil { firstErr = fmt.Errorf("all connection attempts failed") } - return nil, firstErr + // Summarize attempted targets to help with diagnostics. + var attempts []string + for _, c := range cands { + scheme := "plaintext" + if c.useTLS { + scheme = "tls" + } + attempts = append(attempts, fmt.Sprintf("%s (%s)", c.target, scheme)) + } + return nil, fmt.Errorf("failed to connect to any Lumera endpoint; attempted: %s; last error: %v", strings.Join(attempts, ", "), firstErr) } - // Cancel remaining attempts; return the winner. - cancelAll() + // Remaining attempts were already canceled once we had a winner. // Info log showing final selected target and scheme scheme := "plaintext" @@ -257,11 +266,8 @@ func createGRPCConnection(ctx context.Context, hostPort string, creds credential case connectivity.Shutdown: conn.Close() return nil, fmt.Errorf("grpc connection is shutdown") - case connectivity.TransientFailure: - conn.Close() - return nil, fmt.Errorf("grpc connection is in transient failure") default: - // Idle or Connecting: wait for a state change or timeout + // For Idle, Connecting, and TransientFailure, wait for state change or timeout if !conn.WaitForStateChange(ctx, state) { conn.Close() return nil, fmt.Errorf("timeout waiting for grpc connection readiness") diff --git a/proto/supernode/service.proto b/proto/supernode/service.proto index 9725f84a..05d071d4 100644 --- a/proto/supernode/service.proto +++ b/proto/supernode/service.proto @@ -12,12 +12,61 @@ service SupernodeService { get: "/api/v1/status" }; } - + rpc ListServices(ListServicesRequest) returns (ListServicesResponse) { option (google.api.http) = { get: "/api/v1/services" }; } + + // Profiling endpoints + rpc GetPprofIndex(GetPprofIndexRequest) returns (GetPprofIndexResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof" + }; + } + + rpc GetPprofHeap(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof/heap" + }; + } + + rpc GetPprofGoroutine(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof/goroutine" + }; + } + + rpc GetPprofAllocs(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof/allocs" + }; + } + + rpc GetPprofBlock(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof/block" + }; + } + + rpc GetPprofMutex(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof/mutex" + }; + } + + rpc GetPprofThreadcreate(GetPprofProfileRequest) returns (GetPprofProfileResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof/threadcreate" + }; + } + + rpc GetPprofProfile(GetPprofCpuProfileRequest) returns (GetPprofProfileResponse) { + option (google.api.http) = { + get: "/api/v1/debug/pprof/profile" + }; + } } message ListServicesRequest {} @@ -32,3 +81,26 @@ message ServiceInfo { repeated string methods = 2; } +// Pprof message types +message GetPprofIndexRequest {} + +message GetPprofIndexResponse { + string html = 1; // HTML content for the pprof index page + bool enabled = 2; // Whether profiling is enabled +} + +message GetPprofProfileRequest { + int32 debug = 1; // Debug level (optional, default 1) +} + +message GetPprofCpuProfileRequest { + int32 seconds = 1; // Duration in seconds (optional, default 30) +} + +message GetPprofProfileResponse { + bytes data = 1; // Profile data (binary pprof format) + string content_type = 2; // Content type of the response + bool enabled = 3; // Whether profiling is enabled + string error = 4; // Error message if profiling is disabled +} + diff --git a/sdk/action/client.go b/sdk/action/client.go index 356f59f2..82ffa052 100644 --- a/sdk/action/client.go +++ b/sdk/action/client.go @@ -254,12 +254,12 @@ func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("read file: %w", err) } - // Build layout metadata only (no symbols). Supernodes will create symbols. - rq := codec.NewRaptorQCodec("") - layout, err := rq.CreateMetadata(ctx, filePath) - if err != nil { - return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("raptorq create metadata: %w", err) - } + // Build layout metadata only (no symbols). Supernodes will create symbols. + rq := codec.NewRaptorQCodec("") + layout, err := rq.CreateMetadata(ctx, filePath) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("raptorq create metadata: %w", err) + } // Derive `max` from chain params, then create signatures and index IDs paramsResp, err := c.lumeraClient.GetActionParams(ctx) diff --git a/sn-manager/cmd/check.go b/sn-manager/cmd/check.go index df20b2a5..2e6e971b 100644 --- a/sn-manager/cmd/check.go +++ b/sn-manager/cmd/check.go @@ -1,14 +1,14 @@ package cmd import ( - "fmt" - "strings" + "fmt" + "strings" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/updater" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" - "github.com/spf13/cobra" + "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" + "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" + "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/updater" + "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" + "github.com/spf13/cobra" ) var checkCmd = &cobra.Command{ @@ -32,8 +32,8 @@ func runCheck(cmd *cobra.Command, args []string) error { fmt.Println("Checking for updates...") - // Create GitHub client - client := github.NewClient(config.GitHubRepo) + // Create GitHub client + client := github.NewClient(config.GitHubRepo) // Get latest stable release release, err := client.GetLatestStableRelease() @@ -41,26 +41,26 @@ func runCheck(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to check for stable updates: %w", err) } - fmt.Printf("\nLatest release: %s\n", release.TagName) - fmt.Printf("Current version: %s\n", cfg.Updates.CurrentVersion) - // Report manager version and if it would update under the same policy - mv := strings.TrimSpace(appVersion) - if mv != "" && mv != "dev" && !strings.EqualFold(mv, "unknown") { - managerWould := utils.SameMajor(mv, release.TagName) && utils.CompareVersions(mv, release.TagName) < 0 - fmt.Printf("Manager version: %s (would update: %v)\n", mv, managerWould) - } else { - fmt.Printf("Manager version: %s\n", appVersion) - } + fmt.Printf("\nLatest release: %s\n", release.TagName) + fmt.Printf("Current version: %s\n", cfg.Updates.CurrentVersion) + // Report manager version and if it would update under the same policy + mv := strings.TrimSpace(appVersion) + if mv != "" && mv != "dev" && !strings.EqualFold(mv, "unknown") { + managerWould := utils.SameMajor(mv, release.TagName) && utils.CompareVersions(mv, release.TagName) < 0 + fmt.Printf("Manager version: %s (would update: %v)\n", mv, managerWould) + } else { + fmt.Printf("Manager version: %s\n", appVersion) + } - // Compare versions - cmp := utils.CompareVersions(cfg.Updates.CurrentVersion, release.TagName) + // Compare versions + cmp := utils.CompareVersions(cfg.Updates.CurrentVersion, release.TagName) if cmp < 0 { // Use the same logic as auto-updater to determine update eligibility - managerHome := config.GetManagerHome() - autoUpdater := updater.New(managerHome, cfg, appVersion) - wouldAutoUpdate := autoUpdater.ShouldUpdate(cfg.Updates.CurrentVersion, release.TagName) - + managerHome := config.GetManagerHome() + autoUpdater := updater.New(managerHome, cfg, appVersion, nil) + wouldAutoUpdate := autoUpdater.ShouldUpdate(cfg.Updates.CurrentVersion, release.TagName) + if wouldAutoUpdate { fmt.Printf("\nāœ“ Update available: %s → %s\n", cfg.Updates.CurrentVersion, release.TagName) fmt.Printf("Published: %s\n", release.PublishedAt.Format("2006-01-02 15:04:05")) diff --git a/sn-manager/cmd/start.go b/sn-manager/cmd/start.go index de03c6dd..f98ff737 100644 --- a/sn-manager/cmd/start.go +++ b/sn-manager/cmd/start.go @@ -121,12 +121,27 @@ func runStart(cmd *cobra.Command, args []string) error { } } + // orchestrator to gracefully stop SuperNode and exit manager with code 3 + gracefulManagerRestart := func() { + // Write stop marker so monitor won't auto-restart SuperNode + stopMarkerPath := filepath.Join(home, stopMarkerFile) + _ = os.WriteFile(stopMarkerPath, []byte("manager-update"), 0644) + + // Attempt graceful stop of SuperNode if running + if mgr.IsRunning() { + if err := mgr.Stop(); err != nil { + log.Printf("Failed to stop supernode: %v", err) + } + } + os.Exit(3) + } + // Mandatory version sync on startup: ensure both sn-manager and SuperNode // are at the latest stable release. This bypasses regular updater checks // (gateway idleness, same-major policy) to guarantee a consistent baseline. - // Runs once before monitoring begins. + // Runs once before monitoring begins. If manager updated, restart now. func() { - u := updater.New(home, cfg, appVersion) + u := updater.New(home, cfg, appVersion, gracefulManagerRestart) // Do not block startup on failures; best-effort sync defer func() { recover() }() u.ForceSyncToLatest(context.Background()) @@ -135,7 +150,7 @@ func runStart(cmd *cobra.Command, args []string) error { // Start auto-updater if enabled var autoUpdater *updater.AutoUpdater if cfg.Updates.AutoUpgrade { - autoUpdater = updater.New(home, cfg, appVersion) + autoUpdater = updater.New(home, cfg, appVersion, gracefulManagerRestart) autoUpdater.Start(ctx) } @@ -171,7 +186,15 @@ func runStart(cmd *cobra.Command, args []string) error { return nil case err := <-monitorDone: - // Monitor exited unexpectedly + // Monitor exited; ensure SuperNode is stopped as manager exits + if autoUpdater != nil { + autoUpdater.Stop() + } + if mgr.IsRunning() { + if stopErr := mgr.Stop(); stopErr != nil { + log.Printf("Failed to stop supernode: %v", stopErr) + } + } if err != nil { return fmt.Errorf("monitor error: %w", err) } diff --git a/sn-manager/internal/config/config.go b/sn-manager/internal/config/config.go index 87568580..f41a7f89 100644 --- a/sn-manager/internal/config/config.go +++ b/sn-manager/internal/config/config.go @@ -12,10 +12,19 @@ import ( const ( // ManagerHomeDir is the constant home directory for sn-manager ManagerHomeDir = ".sn-manager" - // GitHubRepo is the constant GitHub repository for supernode - GitHubRepo = "LumeraProtocol/supernode" + // defaultGitHubRepo is the default GitHub repository for supernode + defaultGitHubRepo = "LumeraProtocol/supernode" ) +// GitHubRepo is the GitHub repository for supernode and can be overridden via +// the SNM_GITHUB_REPO environment variable. +var GitHubRepo = func() string { + if v := os.Getenv("SNM_GITHUB_REPO"); v != "" { + return v + } + return defaultGitHubRepo +}() + // Config represents the sn-manager configuration type Config struct { Updates UpdateConfig `yaml:"updates"` @@ -81,7 +90,3 @@ func Save(cfg *Config, path string) error { return nil } - -// Validate checks if the configuration is valid -// Validate is kept for compatibility; no-op since interval was removed. -func (c *Config) Validate() error { return nil } diff --git a/sn-manager/internal/manager/manager.go b/sn-manager/internal/manager/manager.go index fd176121..06dacdb4 100644 --- a/sn-manager/internal/manager/manager.go +++ b/sn-manager/internal/manager/manager.go @@ -33,11 +33,6 @@ func New(homeDir string) (*Manager, error) { return nil, fmt.Errorf("failed to load config: %w", err) } - // Validate configuration - if err := cfg.Validate(); err != nil { - return nil, fmt.Errorf("invalid config: %w", err) - } - return &Manager{ config: cfg, homeDir: homeDir, @@ -175,9 +170,9 @@ func (m *Manager) cleanup() { const ( DefaultShutdownTimeout = 30 * time.Second ProcessCheckInterval = 5 * time.Second - CrashBackoffDelay = 2 * time.Second - StopMarkerFile = ".stop_requested" - RestartMarkerFile = ".needs_restart" + CrashBackoffDelay = 2 * time.Second + StopMarkerFile = ".stop_requested" + RestartMarkerFile = ".needs_restart" ) // Monitor continuously supervises the SuperNode process @@ -190,7 +185,7 @@ func (m *Manager) Monitor(ctx context.Context) error { // Channel to monitor process exits processExitCh := make(chan error, 1) - + // Function to arm the process wait goroutine armProcessWait := func() { processExitCh = make(chan error, 1) @@ -262,7 +257,7 @@ func (m *Manager) Monitor(ctx context.Context) error { case <-ticker.C: // Periodic check for various conditions - + // 1. Check if stop marker was removed and we should start if !m.IsRunning() { if _, err := os.Stat(stopMarkerPath); os.IsNotExist(err) { @@ -281,16 +276,16 @@ func (m *Manager) Monitor(ctx context.Context) error { if _, err := os.Stat(restartMarkerPath); err == nil { if m.IsRunning() { log.Println("Binary update detected, restarting SuperNode...") - + // Remove the restart marker if err := os.Remove(restartMarkerPath); err != nil && !os.IsNotExist(err) { log.Printf("Warning: failed to remove restart marker: %v", err) } - + // Create temporary stop marker for clean restart tmpStopMarker := []byte("update") os.WriteFile(stopMarkerPath, tmpStopMarker, 0644) - + // Stop current process if err := m.Stop(); err != nil { log.Printf("Failed to stop for update: %v", err) @@ -299,15 +294,15 @@ func (m *Manager) Monitor(ctx context.Context) error { } continue } - + // Brief pause time.Sleep(CrashBackoffDelay) - + // Remove temporary stop marker if err := os.Remove(stopMarkerPath); err != nil && !os.IsNotExist(err) { log.Printf("Warning: failed to remove stop marker: %v", err) } - + // Start with new binary log.Println("Starting with updated binary...") if err := m.Start(ctx); err != nil { @@ -325,7 +320,7 @@ func (m *Manager) Monitor(ctx context.Context) error { m.mu.RLock() proc := m.process m.mu.RUnlock() - + if proc != nil { if err := proc.Signal(syscall.Signal(0)); err != nil { // Process is dead but not cleaned up @@ -344,4 +339,3 @@ func (m *Manager) Monitor(ctx context.Context) error { func (m *Manager) GetConfig() *config.Config { return m.config } - diff --git a/sn-manager/internal/updater/updater.go b/sn-manager/internal/updater/updater.go index b2e01e2d..548af07b 100644 --- a/sn-manager/internal/updater/updater.go +++ b/sn-manager/internal/updater/updater.go @@ -13,15 +13,18 @@ import ( "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/version" + "github.com/LumeraProtocol/supernode/v2/supernode/transport/gateway" ) // Global updater timing constants const ( + // gatewayTimeout bounds the local gateway status probe + // gatewayTimeout = 15 * time.Second // updateCheckInterval is how often the periodic updater runs - updateCheckInterval = 10 * time.Minute + updateCheckInterval = 5 * time.Minute // forceUpdateAfter is the age threshold after a release is published - // beyond which updates are applied regardless of normal gates (policy only) - forceUpdateAfter = 30 * time.Minute + // beyond which updates are applied regardless of normal gates (idle, policy) + forceUpdateAfter = 5 * time.Minute ) type AutoUpdater struct { @@ -29,21 +32,32 @@ type AutoUpdater struct { homeDir string githubClient github.GithubClient versionMgr *version.Manager + gatewayURL string ticker *time.Ticker stopCh chan struct{} managerVersion string + // Gateway error backoff state + gwErrCount int + gwErrWindowStart time.Time + // Optional hook to handle manager update (restart) orchestration + onManagerUpdate func() } // Use protobuf JSON decoding for gateway responses (int64s encoded as strings) -func New(homeDir string, cfg *config.Config, managerVersion string) *AutoUpdater { +func New(homeDir string, cfg *config.Config, managerVersion string, onManagerUpdate func()) *AutoUpdater { + // Use the correct gateway endpoint with imported constants + gatewayURL := fmt.Sprintf("http://localhost:%d/api/v1/status", gateway.DefaultGatewayPort) + return &AutoUpdater{ - config: cfg, - homeDir: homeDir, - githubClient: github.NewClient(config.GitHubRepo), - versionMgr: version.NewManager(homeDir), - stopCh: make(chan struct{}), - managerVersion: managerVersion, + config: cfg, + homeDir: homeDir, + githubClient: github.NewClient(config.GitHubRepo), + versionMgr: version.NewManager(homeDir), + gatewayURL: gatewayURL, + stopCh: make(chan struct{}), + managerVersion: managerVersion, + onManagerUpdate: onManagerUpdate, } } @@ -121,6 +135,36 @@ func (u *AutoUpdater) ShouldUpdate(current, latest string) bool { // isGatewayIdle returns (idle, isError). When isError is true, // the gateway could not be reliably checked (network/error/invalid). // When isError is false and idle is false, the gateway is busy. +func (u *AutoUpdater) isGatewayIdle() (bool, bool) { + // client := &http.Client{Timeout: gatewayTimeout} + + // resp, err := client.Get(u.gatewayURL) + // if err != nil { + // log.Printf("Failed to check gateway status: %v", err) + // // Error contacting gateway + // return false, true + // } + // defer resp.Body.Close() + + // if resp.StatusCode != http.StatusOK { + // log.Printf("Gateway returned status %d, not safe to update", resp.StatusCode) + // return false, true + // } + + // var status pb.StatusResponse + // body, err := io.ReadAll(resp.Body) + // if err != nil { + // log.Printf("Failed to read gateway response: %v", err) + // return false, true + // } + // if err := protojson.Unmarshal(body, &status); err != nil { + // log.Printf("Failed to decode gateway response: %v", err) + // return false, true + // } + + // // TEMP: tasks are not available in the new gateway endpoint; skip busy-check + return true, false +} // checkAndUpdateCombined performs a single release check and, if needed, // downloads the release tarball once to update sn-manager and SuperNode. @@ -183,6 +227,20 @@ func (u *AutoUpdater) checkAndUpdateCombined(force bool) { return } + // Gate all updates (manager + SuperNode) on gateway idleness + // to avoid disrupting traffic during a self-update. + if !force { + if idle, isErr := u.isGatewayIdle(); !idle { + if isErr { + // Track errors and possibly request a clean SuperNode restart + u.handleGatewayError() + } else { + log.Println("Gateway busy, deferring updates") + } + return + } + } + // Download the combined release tarball once tarURL, err := u.githubClient.GetReleaseTarballURL(latest) if err != nil { @@ -285,13 +343,55 @@ func (u *AutoUpdater) checkAndUpdateCombined(force bool) { // If manager updated, restart service after completing all work if managerUpdated { log.Printf("Self-update applied, restarting service...") - go func() { - time.Sleep(500 * time.Millisecond) + if u.onManagerUpdate != nil { + u.onManagerUpdate() + } else { + // Fallback: immediate process restart signal os.Exit(3) - }() + } } } // handleGatewayError increments an error counter in a rolling 5-minute window // and when the threshold is reached, requests a clean SuperNode restart by // writing the standard restart marker consumed by the manager monitor. +func (u *AutoUpdater) handleGatewayError() { + const ( + window = 5 * time.Minute + retries = 3 // attempts within window before restart + ) + now := time.Now() + if u.gwErrWindowStart.IsZero() { + u.gwErrWindowStart = now + u.gwErrCount = 1 + log.Printf("Gateway check error (1/%d); starting 5m observation window", retries) + return + } + + elapsed := now.Sub(u.gwErrWindowStart) + if elapsed >= window { + // Window elapsed; decide based on accumulated errors + if u.gwErrCount >= retries { + marker := filepath.Join(u.homeDir, ".needs_restart") + if err := os.WriteFile(marker, []byte("gateway-error-recover"), 0644); err != nil { + log.Printf("Failed to write restart marker after gateway errors: %v", err) + } else { + log.Printf("Gateway errors persisted (%d/%d) over >=5m; requesting SuperNode restart to recover gateway", u.gwErrCount, retries) + } + } + // Start a new window beginning now, with this error as the first hit + u.gwErrWindowStart = now + u.gwErrCount = 1 + return + } + + // Still within the window; increment and possibly announce threshold reached + u.gwErrCount++ + if u.gwErrCount < retries { + log.Printf("Gateway check error (%d/%d) within 5m; will retry", u.gwErrCount, retries) + return + } + // Threshold reached but do not restart until full window elapses + remaining := window - elapsed + log.Printf("Gateway error threshold reached; waiting %s before requesting SuperNode restart", remaining.Truncate(time.Second)) +} diff --git a/supernode/cascade/adaptors/p2p.go b/supernode/cascade/adaptors/p2p.go index 857df09a..4f39008e 100644 --- a/supernode/cascade/adaptors/p2p.go +++ b/supernode/cascade/adaptors/p2p.go @@ -18,10 +18,11 @@ import ( ) const ( - loadSymbolsBatchSize = 3000 + loadSymbolsBatchSize = 1000 storeSymbolsPercent = 18 storeBatchContextTimeout = 3 * time.Minute P2PDataRaptorQSymbol = 1 + firstPassMetaChunk = 32 ) type P2PService interface { @@ -92,6 +93,31 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action } logtrace.Info(ctx, "store: selected symbols", logtrace.Fields{"selected": len(keys), "of_total": totalAvailable, "dir": symbolsDir}) logtrace.Info(ctx, "store: sending symbols", logtrace.Fields{"count": len(keys)}) + // Flush metadata first in small chunks; then proceed with symbol-only batches + if len(metadataFiles) > 0 { + for i := 0; i < len(metadataFiles); { + end := min(i+firstPassMetaChunk, len(metadataFiles)) + chunk := metadataFiles[i:end] + if i == 0 { + logtrace.Info(ctx, "store: batch send (first)", logtrace.Fields{"taskID": taskID, "metadata_count": len(chunk), "symbols_in_batch": 0, "payload_total": len(chunk)}) + } else { + logtrace.Info(ctx, "store: batch send (first-meta)", logtrace.Fields{"taskID": taskID, "metadata_count": len(chunk), "symbols_in_batch": 0, "payload_total": len(chunk)}) + } + bctx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) + err := p.p2p.StoreBatch(bctx, chunk, P2PDataRaptorQSymbol, taskID) + cancel() + if err != nil { + return 0, 0, fmt.Errorf("p2p store batch (first/meta): %w", err) + } + if i == 0 { + logtrace.Info(ctx, "store: batch ok (first)", logtrace.Fields{"taskID": taskID, "symbols_stored": 0}) + } else { + logtrace.Info(ctx, "store: batch ok (first-meta)", logtrace.Fields{"taskID": taskID, "symbols_stored": 0}) + } + i = end + } + metadataFiles = nil // ensure first symbol batch is not combined with metadata + } totalSymbols := 0 firstBatchProcessed := false for start := 0; start < len(keys); { diff --git a/supernode/cascade/helper.go b/supernode/cascade/helper.go index 75315a09..ea444729 100644 --- a/supernode/cascade/helper.go +++ b/supernode/cascade/helper.go @@ -12,11 +12,10 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/supernode/cascade/adaptors" sdk "github.com/cosmos/cosmos-sdk/types" - json "github.com/json-iterator/go" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -79,11 +78,10 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. if err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode layout signature from base64", err, f) } - layoutJSON, err := json.Marshal(encodedMeta) + layoutB64, err := cascadekit.LayoutB64(encodedMeta) if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to marshal layout", err, f) + return codec.Layout{}, "", task.wrapErr(ctx, "failed to build layout base64", err, f) } - layoutB64 := utils.B64Encode(layoutJSON) if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSigBytes); err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify layout signature", err, f) } diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index e1af0616..1b0b1de7 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -3,8 +3,6 @@ package cmd import ( "context" "fmt" - "net/http" - _ "net/http/pprof" "os" "os/signal" "path/filepath" @@ -145,30 +143,17 @@ The supernode will connect to the Lumera network and begin participating in the } // Create HTTP gateway server that directly calls the supernode server - gatewayServer, err := gateway.NewServer(appConfig.SupernodeConfig.Host, int(appConfig.SupernodeConfig.GatewayPort), supernodeServer) + // Pass chain ID for pprof configuration + gatewayServer, err := gateway.NewServerWithConfig( + appConfig.SupernodeConfig.Host, + int(appConfig.SupernodeConfig.GatewayPort), + supernodeServer, + appConfig.LumeraClientConfig.ChainID, + ) if err != nil { return fmt.Errorf("failed to create gateway server: %w", err) } - // Start profiling server on testnet only - isTestnet := strings.Contains(strings.ToLower(appConfig.LumeraClientConfig.ChainID), "testnet") - - if isTestnet && os.Getenv("INTEGRATION_TEST") != "true" { - profilingAddr := "0.0.0.0:8082" - - logtrace.Debug(ctx, "Starting profiling server", logtrace.Fields{ - "address": profilingAddr, - "chain_id": appConfig.LumeraClientConfig.ChainID, - "is_testnet": isTestnet, - }) - - go func() { - if err := http.ListenAndServe(profilingAddr, nil); err != nil { - logtrace.Error(ctx, "Profiling server error", logtrace.Fields{"error": err.Error()}) - } - }() - } - // Start the services using the standard runner and capture exit servicesErr := make(chan error, 1) go func() { servicesErr <- RunServices(ctx, grpcServer, cService, p2pService, gatewayServer) }() @@ -176,6 +161,7 @@ The supernode will connect to the Lumera network and begin participating in the // Set up signal handling for graceful shutdown sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + defer signal.Stop(sigCh) // Wait for either a termination signal or service exit var triggeredBySignal bool @@ -195,20 +181,24 @@ The supernode will connect to the Lumera network and begin participating in the // Cancel context to signal all services cancel() - // Stop HTTP gateway and gRPC servers gracefully + // Stop HTTP gateway and gRPC servers without blocking shutdown shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) defer shutdownCancel() - if err := gatewayServer.Stop(shutdownCtx); err != nil { - logtrace.Warn(ctx, "Gateway shutdown warning", logtrace.Fields{"error": err.Error()}) - } + go func() { + if err := gatewayServer.Stop(shutdownCtx); err != nil { + logtrace.Warn(ctx, "Gateway shutdown warning", logtrace.Fields{"error": err.Error()}) + } + }() grpcServer.Close() - // Close Lumera client (preserve original log messages) + // Close Lumera client without blocking shutdown logtrace.Debug(ctx, "Closing Lumera client", logtrace.Fields{}) - if err := lumeraClient.Close(); err != nil { - logtrace.Error(ctx, "Error closing Lumera client", logtrace.Fields{"error": err.Error()}) - } + go func() { + if err := lumeraClient.Close(); err != nil { + logtrace.Error(ctx, "Error closing Lumera client", logtrace.Fields{"error": err.Error()}) + } + }() // If we triggered shutdown by signal, wait for services to drain if triggeredBySignal { diff --git a/supernode/config.yml b/supernode/config.yml index 3bbf8b7e..35d888a3 100644 --- a/supernode/config.yml +++ b/supernode/config.yml @@ -2,7 +2,9 @@ supernode: key_name: "mykey" # Account name for the supernode in keyring identity: "lumera1ccmw5plzuldntum2rz6kq6uq346vtrhrvwfzsa" # Identity of the supernode, lumera address + # You can set either 'host' (preferred) or 'ip_address' (legacy alias). host: "0.0.0.0" + # ip_address: "0.0.0.0" port: 4444 # Keyring Configuration diff --git a/supernode/config/config.go b/supernode/config/config.go index e3910ac2..d655391c 100644 --- a/supernode/config/config.go +++ b/supernode/config/config.go @@ -5,15 +5,18 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "gopkg.in/yaml.v3" ) type SupernodeConfig struct { - KeyName string `yaml:"key_name"` - Identity string `yaml:"identity"` - Host string `yaml:"host"` + KeyName string `yaml:"key_name"` + Identity string `yaml:"identity"` + Host string `yaml:"host"` + // IPAddress is an accepted alias for Host to support older configs + IPAddress string `yaml:"ip_address,omitempty"` Port uint16 `yaml:"port"` GatewayPort uint16 `yaml:"gateway_port,omitempty"` } @@ -127,6 +130,15 @@ func LoadConfig(filename string, baseDir string) (*Config, error) { return nil, fmt.Errorf("error parsing config file: %w", err) } + // Support both 'host' and legacy 'ip_address' fields. If 'host' is empty + // and 'ip_address' is provided, use it as the host value. + if strings.TrimSpace(config.SupernodeConfig.Host) == "" && strings.TrimSpace(config.SupernodeConfig.IPAddress) != "" { + config.SupernodeConfig.Host = strings.TrimSpace(config.SupernodeConfig.IPAddress) + logtrace.Debug(ctx, "Using ip_address as host", logtrace.Fields{ + "ip_address": config.SupernodeConfig.IPAddress, + }) + } + // Set the base directory config.BaseDir = baseDir diff --git a/supernode/status/service.go b/supernode/status/service.go index 1745b0d3..553f7e2d 100644 --- a/supernode/status/service.go +++ b/supernode/status/service.go @@ -32,6 +32,14 @@ func NewSupernodeStatusService(p2pService p2p.Client, lumeraClient lumera.Client return &SupernodeStatusService{metrics: NewMetricsCollector(), storagePaths: []string{"/"}, startTime: time.Now(), p2pService: p2pService, lumeraClient: lumeraClient, config: cfg} } +// GetChainID returns the chain ID from the configuration +func (s *SupernodeStatusService) GetChainID() string { + if s.config != nil { + return s.config.LumeraClientConfig.ChainID + } + return "" +} + // GetStatus returns the current system status including optional P2P info func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetrics bool) (*pb.StatusResponse, error) { fields := logtrace.Fields{logtrace.FieldMethod: "GetStatus", logtrace.FieldModule: "SupernodeStatusService"} diff --git a/supernode/transport/gateway/server.go b/supernode/transport/gateway/server.go index 7e17e238..e306539a 100644 --- a/supernode/transport/gateway/server.go +++ b/supernode/transport/gateway/server.go @@ -5,7 +5,10 @@ import ( "fmt" "net" "net/http" + _ "net/http/pprof" + "os" "strconv" + "strings" "time" "github.com/grpc-ecosystem/grpc-gateway/runtime" @@ -23,6 +26,8 @@ type Server struct { port int server *http.Server supernodeServer pb.SupernodeServiceServer + chainID string + pprofEnabled bool } // NewServer creates a new HTTP gateway server that directly calls the service @@ -44,6 +49,29 @@ func NewServer(ipAddress string, port int, supernodeServer pb.SupernodeServiceSe }, nil } +// NewServerWithConfig creates a new HTTP gateway server with additional configuration +func NewServerWithConfig(ipAddress string, port int, supernodeServer pb.SupernodeServiceServer, chainID string) (*Server, error) { + if supernodeServer == nil { + return nil, fmt.Errorf("supernode server is required") + } + + // Use default port if not specified + if port == 0 { + port = DefaultGatewayPort + } + + // Determine if pprof should be enabled + pprofEnabled := strings.Contains(strings.ToLower(chainID), "testnet") || os.Getenv("ENABLE_PPROF") == "true" + + return &Server{ + ipAddress: ipAddress, + port: port, + supernodeServer: supernodeServer, + chainID: chainID, + pprofEnabled: pprofEnabled, + }, nil +} + // Run starts the HTTP gateway server (implements service interface) func (s *Server) Run(ctx context.Context) error { // Create gRPC-Gateway mux with custom JSON marshaler options @@ -69,6 +97,28 @@ func (s *Server) Run(ctx context.Context) error { // Register Swagger endpoints httpMux.HandleFunc("/swagger.json", s.serveSwaggerJSON) httpMux.HandleFunc("/swagger-ui/", s.serveSwaggerUI) + + // Register pprof endpoints (only on testnet) + if s.pprofEnabled { + httpMux.HandleFunc("/debug/pprof/", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/cmdline", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/profile", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/symbol", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/trace", s.pprofHandler) + // Register specific pprof profiles + httpMux.HandleFunc("/debug/pprof/allocs", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/block", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/goroutine", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/heap", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/mutex", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/threadcreate", s.pprofHandler) + + logtrace.Debug(ctx, "Pprof endpoints enabled on gateway", logtrace.Fields{ + "chain_id": s.chainID, + "port": s.port, + }) + } + httpMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/" { http.Redirect(w, r, "/swagger-ui/", http.StatusFound) @@ -87,8 +137,9 @@ func (s *Server) Run(ctx context.Context) error { } logtrace.Debug(ctx, "Starting HTTP gateway server", logtrace.Fields{ - "address": s.ipAddress, - "port": s.port, + "address": s.ipAddress, + "port": s.port, + "pprof_enabled": s.pprofEnabled, }) // Start server @@ -124,3 +175,19 @@ func (s *Server) corsMiddleware(h http.Handler) http.Handler { h.ServeHTTP(w, r) }) } + +// pprofHandler proxies requests to the pprof handlers +func (s *Server) pprofHandler(w http.ResponseWriter, r *http.Request) { + // Check if pprof is enabled + if !s.pprofEnabled { + http.Error(w, "Profiling is not enabled", http.StatusForbidden) + return + } + + // Get the default pprof handler and serve + if handler, pattern := http.DefaultServeMux.Handler(r); pattern != "" { + handler.ServeHTTP(w, r) + } else { + http.NotFound(w, r) + } +} diff --git a/supernode/transport/gateway/swagger.json b/supernode/transport/gateway/swagger.json index e6857ae0..0a40a447 100644 --- a/supernode/transport/gateway/swagger.json +++ b/supernode/transport/gateway/swagger.json @@ -30,6 +30,22 @@ } } } + }, + "/api/v1/services": { + "get": { + "summary": "List services", + "description": "Returns available gRPC services and their methods/streams", + "responses": { + "200": { + "description": "Services response", + "content": { + "application/json": { + "schema": { "$ref": "#/components/schemas/ListServicesResponse" } + } + } + } + } + } } }, "components": { @@ -40,6 +56,23 @@ "version": { "type": "string" }, "uptimeSeconds": { "type": "integer" } } + }, + "ListServicesResponse": { + "type": "object", + "properties": { + "services": { + "type": "array", + "items": { "$ref": "#/components/schemas/ServiceInfo" } + }, + "count": { "type": "integer" } + } + }, + "ServiceInfo": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "methods": { "type": "array", "items": { "type": "string" } } + } } } } diff --git a/supernode/transport/grpc/status/handler.go b/supernode/transport/grpc/status/handler.go index 7c9c22bf..4e120279 100644 --- a/supernode/transport/grpc/status/handler.go +++ b/supernode/transport/grpc/status/handler.go @@ -4,6 +4,7 @@ import ( "context" pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + pbcascade "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" statussvc "github.com/LumeraProtocol/supernode/v2/supernode/status" ) @@ -22,3 +23,31 @@ func NewSupernodeServer(statusService *statussvc.SupernodeStatusService) *Supern func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) { return s.statusService.GetStatus(ctx, req.GetIncludeP2PMetrics()) } + +// ListServices implements SupernodeService.ListServices +func (s *SupernodeServer) ListServices(ctx context.Context, _ *pb.ListServicesRequest) (*pb.ListServicesResponse, error) { + // Describe available services and methods/streams exposed by this node + var services []*pb.ServiceInfo + + // SupernodeService methods + var supernodeMethods []string + for _, m := range pb.SupernodeService_ServiceDesc.Methods { + supernodeMethods = append(supernodeMethods, m.MethodName) + } + services = append(services, &pb.ServiceInfo{ + Name: pb.SupernodeService_ServiceDesc.ServiceName, + Methods: supernodeMethods, + }) + + // CascadeService streams (surface stream names as methods for discovery) + var cascadeMethods []string + for _, st := range pbcascade.CascadeService_ServiceDesc.Streams { + cascadeMethods = append(cascadeMethods, st.StreamName) + } + services = append(services, &pb.ServiceInfo{ + Name: pbcascade.CascadeService_ServiceDesc.ServiceName, + Methods: cascadeMethods, + }) + + return &pb.ListServicesResponse{Services: services, Count: int32(len(services))}, nil +} diff --git a/supernode/transport/grpc/status/pprof_handlers.go b/supernode/transport/grpc/status/pprof_handlers.go new file mode 100644 index 00000000..4557e5f6 --- /dev/null +++ b/supernode/transport/grpc/status/pprof_handlers.go @@ -0,0 +1,223 @@ +package server + +import ( + "bytes" + "context" + "fmt" + "os" + "runtime" + "runtime/pprof" + "strings" + "time" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" +) + +// isPprofEnabled checks if pprof should be enabled based on chain ID or environment variable +func (s *SupernodeServer) isPprofEnabled() bool { + // Check if chain ID contains testnet + if s.statusService != nil && s.statusService.GetChainID() != "" { + if strings.Contains(strings.ToLower(s.statusService.GetChainID()), "testnet") { + return true + } + } + + // Check environment variable + return os.Getenv("ENABLE_PPROF") == "true" +} + +// GetPprofIndex returns the pprof index page +func (s *SupernodeServer) GetPprofIndex(ctx context.Context, req *pb.GetPprofIndexRequest) (*pb.GetPprofIndexResponse, error) { + if !s.isPprofEnabled() { + return &pb.GetPprofIndexResponse{ + Html: "", + Enabled: false, + }, nil + } + + // Generate a simple index page with links to available profiles + html := ` + + +Supernode Profiling + + + +

Supernode Profiling

+

Available profiles:

+ + +` + + return &pb.GetPprofIndexResponse{ + Html: html, + Enabled: true, + }, nil +} + +// GetPprofHeap returns the heap profile +func (s *SupernodeServer) GetPprofHeap(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { + return s.getPprofProfile("heap", req.GetDebug()) +} + +// GetPprofGoroutine returns the goroutine profile +func (s *SupernodeServer) GetPprofGoroutine(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { + return s.getPprofProfile("goroutine", req.GetDebug()) +} + +// GetPprofAllocs returns the allocations profile +func (s *SupernodeServer) GetPprofAllocs(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { + return s.getPprofProfile("allocs", req.GetDebug()) +} + +// GetPprofBlock returns the block profile +func (s *SupernodeServer) GetPprofBlock(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { + return s.getPprofProfile("block", req.GetDebug()) +} + +// GetPprofMutex returns the mutex profile +func (s *SupernodeServer) GetPprofMutex(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { + return s.getPprofProfile("mutex", req.GetDebug()) +} + +// GetPprofThreadcreate returns the threadcreate profile +func (s *SupernodeServer) GetPprofThreadcreate(ctx context.Context, req *pb.GetPprofProfileRequest) (*pb.GetPprofProfileResponse, error) { + return s.getPprofProfile("threadcreate", req.GetDebug()) +} + +// GetPprofProfile returns the CPU profile +func (s *SupernodeServer) GetPprofProfile(ctx context.Context, req *pb.GetPprofCpuProfileRequest) (*pb.GetPprofProfileResponse, error) { + if !s.isPprofEnabled() { + return &pb.GetPprofProfileResponse{ + Enabled: false, + Error: "Profiling is disabled. Enable on testnet or set ENABLE_PPROF=true", + }, nil + } + + seconds := req.GetSeconds() + if seconds <= 0 { + seconds = 30 // Default to 30 seconds + } + if seconds > 300 { + seconds = 300 // Cap at 5 minutes + } + + var buf bytes.Buffer + if err := pprof.StartCPUProfile(&buf); err != nil { + return &pb.GetPprofProfileResponse{ + Enabled: true, + Error: fmt.Sprintf("Failed to start CPU profile: %v", err), + }, nil + } + + // Profile for the specified duration + time.Sleep(time.Duration(seconds) * time.Second) + pprof.StopCPUProfile() + + return &pb.GetPprofProfileResponse{ + Data: buf.Bytes(), + ContentType: "application/octet-stream", + Enabled: true, + }, nil +} + +// getPprofProfile is a helper function to get various runtime profiles +func (s *SupernodeServer) getPprofProfile(profileType string, debug int32) (*pb.GetPprofProfileResponse, error) { + if !s.isPprofEnabled() { + return &pb.GetPprofProfileResponse{ + Enabled: false, + Error: "Profiling is disabled. Enable on testnet or set ENABLE_PPROF=true", + }, nil + } + + var buf bytes.Buffer + var contentType string + + // Get the appropriate profile + var p *pprof.Profile + switch profileType { + case "heap": + runtime.GC() // Force GC before heap profile + p = pprof.Lookup("heap") + contentType = "application/octet-stream" + case "goroutine": + p = pprof.Lookup("goroutine") + contentType = "application/octet-stream" + case "allocs": + p = pprof.Lookup("allocs") + contentType = "application/octet-stream" + case "block": + p = pprof.Lookup("block") + contentType = "application/octet-stream" + case "mutex": + p = pprof.Lookup("mutex") + contentType = "application/octet-stream" + case "threadcreate": + p = pprof.Lookup("threadcreate") + contentType = "application/octet-stream" + default: + return &pb.GetPprofProfileResponse{ + Enabled: true, + Error: fmt.Sprintf("Unknown profile type: %s", profileType), + }, nil + } + + if p == nil { + return &pb.GetPprofProfileResponse{ + Enabled: true, + Error: fmt.Sprintf("Profile %s not found", profileType), + }, nil + } + + // Write the profile to buffer + // If debug > 0, write in text format for human reading + if debug > 0 { + if err := p.WriteTo(&buf, int(debug)); err != nil { + return &pb.GetPprofProfileResponse{ + Enabled: true, + Error: fmt.Sprintf("Failed to write profile: %v", err), + }, nil + } + contentType = "text/plain" + } else { + // Write in binary pprof format + if err := p.WriteTo(&buf, 0); err != nil { + return &pb.GetPprofProfileResponse{ + Enabled: true, + Error: fmt.Sprintf("Failed to write profile: %v", err), + }, nil + } + } + + return &pb.GetPprofProfileResponse{ + Data: buf.Bytes(), + ContentType: contentType, + Enabled: true, + }, nil +} \ No newline at end of file diff --git a/tests/system/e2e_cascade_test.go b/tests/system/e2e_cascade_test.go index 2db7ad09..cc2e93bc 100644 --- a/tests/system/e2e_cascade_test.go +++ b/tests/system/e2e_cascade_test.go @@ -35,11 +35,15 @@ import ( // The test demonstrates how data flows through the Lumera system: // 1. Start services (blockchain, RaptorQ, supernode) // 2. Set up test accounts and keys -// 3. Create test data and process it through RaptorQ +// 3. Create test data and build RaptorQ metadata (no symbols) // 4. Sign the data and RQ identifiers // 5. Submit a CASCADE action request with proper metadata // 6. Execute the Cascade operation with the action ID // 7. Monitor task completion and verify results +// +// Memory efficiency: This test streams large files (original and downloaded) +// for hashing and size checks using io.Copy + sha256.New, avoiding full-file +// in-memory buffers. All existing logs are preserved unchanged. func TestCascadeE2E(t *testing.T) { // --------------------------------------- // Constants and Configuration Parameters @@ -239,7 +243,7 @@ func TestCascadeE2E(t *testing.T) { require.NoError(t, err, "Failed to initialize Lumera client") // --------------------------------------- - // Step 4: Create and prepare layout file for RaptorQ encoding + // Step 4: Create and prepare layout metadata for RaptorQ (no symbols) // --------------------------------------- t.Log("Step 4: Creating test file for RaptorQ encoding") diff --git a/tests/system/go.mod b/tests/system/go.mod index 8e1d8840..99bb1df9 100644 --- a/tests/system/go.mod +++ b/tests/system/go.mod @@ -95,7 +95,6 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.4 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/mock v1.6.0 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.3 // indirect github.com/google/flatbuffers v1.12.1 // indirect diff --git a/tests/system/go.sum b/tests/system/go.sum index d00c5807..5737b819 100644 --- a/tests/system/go.sum +++ b/tests/system/go.sum @@ -806,7 +806,6 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= @@ -892,7 +891,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -938,7 +936,6 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1031,7 +1028,6 @@ golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=