diff --git a/api/cosmos/poolrebalancer/v1/poolrebalancer.pulsar.go b/api/cosmos/poolrebalancer/v1/poolrebalancer.pulsar.go new file mode 100644 index 00000000..a30ca40f --- /dev/null +++ b/api/cosmos/poolrebalancer/v1/poolrebalancer.pulsar.go @@ -0,0 +1,2852 @@ +// Code generated by protoc-gen-go-pulsar. DO NOT EDIT. +package poolrebalancerv1 + +import ( + v1beta1 "cosmossdk.io/api/cosmos/base/v1beta1" + fmt "fmt" + runtime "github.com/cosmos/cosmos-proto/runtime" + _ "github.com/cosmos/gogoproto/gogoproto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoiface "google.golang.org/protobuf/runtime/protoiface" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + io "io" + reflect "reflect" + sync "sync" +) + +var ( + md_Params protoreflect.MessageDescriptor + fd_Params_pool_delegator_address protoreflect.FieldDescriptor + fd_Params_max_target_validators protoreflect.FieldDescriptor + fd_Params_rebalance_threshold_bp protoreflect.FieldDescriptor + fd_Params_max_ops_per_block protoreflect.FieldDescriptor + fd_Params_max_move_per_op protoreflect.FieldDescriptor +) + +func init() { + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_init() + md_Params = File_cosmos_poolrebalancer_v1_poolrebalancer_proto.Messages().ByName("Params") + fd_Params_pool_delegator_address = md_Params.Fields().ByName("pool_delegator_address") + fd_Params_max_target_validators = md_Params.Fields().ByName("max_target_validators") + fd_Params_rebalance_threshold_bp = md_Params.Fields().ByName("rebalance_threshold_bp") + fd_Params_max_ops_per_block = md_Params.Fields().ByName("max_ops_per_block") + fd_Params_max_move_per_op = md_Params.Fields().ByName("max_move_per_op") +} + +var _ protoreflect.Message = (*fastReflection_Params)(nil) + +type fastReflection_Params Params + +func (x *Params) ProtoReflect() protoreflect.Message { + return (*fastReflection_Params)(x) +} + +func (x *Params) slowProtoReflect() protoreflect.Message { + mi := &file_cosmos_poolrebalancer_v1_poolrebalancer_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_Params_messageType fastReflection_Params_messageType +var _ protoreflect.MessageType = fastReflection_Params_messageType{} + +type fastReflection_Params_messageType struct{} + +func (x fastReflection_Params_messageType) Zero() protoreflect.Message { + return (*fastReflection_Params)(nil) +} +func (x fastReflection_Params_messageType) New() protoreflect.Message { + return new(fastReflection_Params) +} +func (x fastReflection_Params_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_Params +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_Params) Descriptor() protoreflect.MessageDescriptor { + return md_Params +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_Params) Type() protoreflect.MessageType { + return _fastReflection_Params_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_Params) New() protoreflect.Message { + return new(fastReflection_Params) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_Params) Interface() protoreflect.ProtoMessage { + return (*Params)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_Params) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.PoolDelegatorAddress != "" { + value := protoreflect.ValueOfString(x.PoolDelegatorAddress) + if !f(fd_Params_pool_delegator_address, value) { + return + } + } + if x.MaxTargetValidators != uint32(0) { + value := protoreflect.ValueOfUint32(x.MaxTargetValidators) + if !f(fd_Params_max_target_validators, value) { + return + } + } + if x.RebalanceThresholdBp != uint32(0) { + value := protoreflect.ValueOfUint32(x.RebalanceThresholdBp) + if !f(fd_Params_rebalance_threshold_bp, value) { + return + } + } + if x.MaxOpsPerBlock != uint32(0) { + value := protoreflect.ValueOfUint32(x.MaxOpsPerBlock) + if !f(fd_Params_max_ops_per_block, value) { + return + } + } + if x.MaxMovePerOp != "" { + value := protoreflect.ValueOfString(x.MaxMovePerOp) + if !f(fd_Params_max_move_per_op, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_Params) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.Params.pool_delegator_address": + return x.PoolDelegatorAddress != "" + case "cosmos.poolrebalancer.v1.Params.max_target_validators": + return x.MaxTargetValidators != uint32(0) + case "cosmos.poolrebalancer.v1.Params.rebalance_threshold_bp": + return x.RebalanceThresholdBp != uint32(0) + case "cosmos.poolrebalancer.v1.Params.max_ops_per_block": + return x.MaxOpsPerBlock != uint32(0) + case "cosmos.poolrebalancer.v1.Params.max_move_per_op": + return x.MaxMovePerOp != "" + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.Params")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.Params does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_Params) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.Params.pool_delegator_address": + x.PoolDelegatorAddress = "" + case "cosmos.poolrebalancer.v1.Params.max_target_validators": + x.MaxTargetValidators = uint32(0) + case "cosmos.poolrebalancer.v1.Params.rebalance_threshold_bp": + x.RebalanceThresholdBp = uint32(0) + case "cosmos.poolrebalancer.v1.Params.max_ops_per_block": + x.MaxOpsPerBlock = uint32(0) + case "cosmos.poolrebalancer.v1.Params.max_move_per_op": + x.MaxMovePerOp = "" + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.Params")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.Params does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_Params) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "cosmos.poolrebalancer.v1.Params.pool_delegator_address": + value := x.PoolDelegatorAddress + return protoreflect.ValueOfString(value) + case "cosmos.poolrebalancer.v1.Params.max_target_validators": + value := x.MaxTargetValidators + return protoreflect.ValueOfUint32(value) + case "cosmos.poolrebalancer.v1.Params.rebalance_threshold_bp": + value := x.RebalanceThresholdBp + return protoreflect.ValueOfUint32(value) + case "cosmos.poolrebalancer.v1.Params.max_ops_per_block": + value := x.MaxOpsPerBlock + return protoreflect.ValueOfUint32(value) + case "cosmos.poolrebalancer.v1.Params.max_move_per_op": + value := x.MaxMovePerOp + return protoreflect.ValueOfString(value) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.Params")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.Params does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_Params) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.Params.pool_delegator_address": + x.PoolDelegatorAddress = value.Interface().(string) + case "cosmos.poolrebalancer.v1.Params.max_target_validators": + x.MaxTargetValidators = uint32(value.Uint()) + case "cosmos.poolrebalancer.v1.Params.rebalance_threshold_bp": + x.RebalanceThresholdBp = uint32(value.Uint()) + case "cosmos.poolrebalancer.v1.Params.max_ops_per_block": + x.MaxOpsPerBlock = uint32(value.Uint()) + case "cosmos.poolrebalancer.v1.Params.max_move_per_op": + x.MaxMovePerOp = value.Interface().(string) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.Params")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.Params does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_Params) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.Params.pool_delegator_address": + panic(fmt.Errorf("field pool_delegator_address of message cosmos.poolrebalancer.v1.Params is not mutable")) + case "cosmos.poolrebalancer.v1.Params.max_target_validators": + panic(fmt.Errorf("field max_target_validators of message cosmos.poolrebalancer.v1.Params is not mutable")) + case "cosmos.poolrebalancer.v1.Params.rebalance_threshold_bp": + panic(fmt.Errorf("field rebalance_threshold_bp of message cosmos.poolrebalancer.v1.Params is not mutable")) + case "cosmos.poolrebalancer.v1.Params.max_ops_per_block": + panic(fmt.Errorf("field max_ops_per_block of message cosmos.poolrebalancer.v1.Params is not mutable")) + case "cosmos.poolrebalancer.v1.Params.max_move_per_op": + panic(fmt.Errorf("field max_move_per_op of message cosmos.poolrebalancer.v1.Params is not mutable")) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.Params")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.Params does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_Params) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.Params.pool_delegator_address": + return protoreflect.ValueOfString("") + case "cosmos.poolrebalancer.v1.Params.max_target_validators": + return protoreflect.ValueOfUint32(uint32(0)) + case "cosmos.poolrebalancer.v1.Params.rebalance_threshold_bp": + return protoreflect.ValueOfUint32(uint32(0)) + case "cosmos.poolrebalancer.v1.Params.max_ops_per_block": + return protoreflect.ValueOfUint32(uint32(0)) + case "cosmos.poolrebalancer.v1.Params.max_move_per_op": + return protoreflect.ValueOfString("") + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.Params")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.Params does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_Params) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in cosmos.poolrebalancer.v1.Params", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_Params) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_Params) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_Params) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_Params) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*Params) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + l = len(x.PoolDelegatorAddress) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.MaxTargetValidators != 0 { + n += 1 + runtime.Sov(uint64(x.MaxTargetValidators)) + } + if x.RebalanceThresholdBp != 0 { + n += 1 + runtime.Sov(uint64(x.RebalanceThresholdBp)) + } + if x.MaxOpsPerBlock != 0 { + n += 1 + runtime.Sov(uint64(x.MaxOpsPerBlock)) + } + l = len(x.MaxMovePerOp) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*Params) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if len(x.MaxMovePerOp) > 0 { + i -= len(x.MaxMovePerOp) + copy(dAtA[i:], x.MaxMovePerOp) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.MaxMovePerOp))) + i-- + dAtA[i] = 0x2a + } + if x.MaxOpsPerBlock != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.MaxOpsPerBlock)) + i-- + dAtA[i] = 0x20 + } + if x.RebalanceThresholdBp != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.RebalanceThresholdBp)) + i-- + dAtA[i] = 0x18 + } + if x.MaxTargetValidators != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.MaxTargetValidators)) + i-- + dAtA[i] = 0x10 + } + if len(x.PoolDelegatorAddress) > 0 { + i -= len(x.PoolDelegatorAddress) + copy(dAtA[i:], x.PoolDelegatorAddress) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.PoolDelegatorAddress))) + i-- + dAtA[i] = 0xa + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*Params) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field PoolDelegatorAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.PoolDelegatorAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field MaxTargetValidators", wireType) + } + x.MaxTargetValidators = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + x.MaxTargetValidators |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field RebalanceThresholdBp", wireType) + } + x.RebalanceThresholdBp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + x.RebalanceThresholdBp |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field MaxOpsPerBlock", wireType) + } + x.MaxOpsPerBlock = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + x.MaxOpsPerBlock |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field MaxMovePerOp", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.MaxMovePerOp = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +var ( + md_PendingRedelegation protoreflect.MessageDescriptor + fd_PendingRedelegation_delegator_address protoreflect.FieldDescriptor + fd_PendingRedelegation_src_validator_address protoreflect.FieldDescriptor + fd_PendingRedelegation_dst_validator_address protoreflect.FieldDescriptor + fd_PendingRedelegation_amount protoreflect.FieldDescriptor + fd_PendingRedelegation_completion_time protoreflect.FieldDescriptor +) + +func init() { + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_init() + md_PendingRedelegation = File_cosmos_poolrebalancer_v1_poolrebalancer_proto.Messages().ByName("PendingRedelegation") + fd_PendingRedelegation_delegator_address = md_PendingRedelegation.Fields().ByName("delegator_address") + fd_PendingRedelegation_src_validator_address = md_PendingRedelegation.Fields().ByName("src_validator_address") + fd_PendingRedelegation_dst_validator_address = md_PendingRedelegation.Fields().ByName("dst_validator_address") + fd_PendingRedelegation_amount = md_PendingRedelegation.Fields().ByName("amount") + fd_PendingRedelegation_completion_time = md_PendingRedelegation.Fields().ByName("completion_time") +} + +var _ protoreflect.Message = (*fastReflection_PendingRedelegation)(nil) + +type fastReflection_PendingRedelegation PendingRedelegation + +func (x *PendingRedelegation) ProtoReflect() protoreflect.Message { + return (*fastReflection_PendingRedelegation)(x) +} + +func (x *PendingRedelegation) slowProtoReflect() protoreflect.Message { + mi := &file_cosmos_poolrebalancer_v1_poolrebalancer_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_PendingRedelegation_messageType fastReflection_PendingRedelegation_messageType +var _ protoreflect.MessageType = fastReflection_PendingRedelegation_messageType{} + +type fastReflection_PendingRedelegation_messageType struct{} + +func (x fastReflection_PendingRedelegation_messageType) Zero() protoreflect.Message { + return (*fastReflection_PendingRedelegation)(nil) +} +func (x fastReflection_PendingRedelegation_messageType) New() protoreflect.Message { + return new(fastReflection_PendingRedelegation) +} +func (x fastReflection_PendingRedelegation_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_PendingRedelegation +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_PendingRedelegation) Descriptor() protoreflect.MessageDescriptor { + return md_PendingRedelegation +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_PendingRedelegation) Type() protoreflect.MessageType { + return _fastReflection_PendingRedelegation_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_PendingRedelegation) New() protoreflect.Message { + return new(fastReflection_PendingRedelegation) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_PendingRedelegation) Interface() protoreflect.ProtoMessage { + return (*PendingRedelegation)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_PendingRedelegation) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.DelegatorAddress != "" { + value := protoreflect.ValueOfString(x.DelegatorAddress) + if !f(fd_PendingRedelegation_delegator_address, value) { + return + } + } + if x.SrcValidatorAddress != "" { + value := protoreflect.ValueOfString(x.SrcValidatorAddress) + if !f(fd_PendingRedelegation_src_validator_address, value) { + return + } + } + if x.DstValidatorAddress != "" { + value := protoreflect.ValueOfString(x.DstValidatorAddress) + if !f(fd_PendingRedelegation_dst_validator_address, value) { + return + } + } + if x.Amount != nil { + value := protoreflect.ValueOfMessage(x.Amount.ProtoReflect()) + if !f(fd_PendingRedelegation_amount, value) { + return + } + } + if x.CompletionTime != nil { + value := protoreflect.ValueOfMessage(x.CompletionTime.ProtoReflect()) + if !f(fd_PendingRedelegation_completion_time, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_PendingRedelegation) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.PendingRedelegation.delegator_address": + return x.DelegatorAddress != "" + case "cosmos.poolrebalancer.v1.PendingRedelegation.src_validator_address": + return x.SrcValidatorAddress != "" + case "cosmos.poolrebalancer.v1.PendingRedelegation.dst_validator_address": + return x.DstValidatorAddress != "" + case "cosmos.poolrebalancer.v1.PendingRedelegation.amount": + return x.Amount != nil + case "cosmos.poolrebalancer.v1.PendingRedelegation.completion_time": + return x.CompletionTime != nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.PendingRedelegation")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.PendingRedelegation does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_PendingRedelegation) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.PendingRedelegation.delegator_address": + x.DelegatorAddress = "" + case "cosmos.poolrebalancer.v1.PendingRedelegation.src_validator_address": + x.SrcValidatorAddress = "" + case "cosmos.poolrebalancer.v1.PendingRedelegation.dst_validator_address": + x.DstValidatorAddress = "" + case "cosmos.poolrebalancer.v1.PendingRedelegation.amount": + x.Amount = nil + case "cosmos.poolrebalancer.v1.PendingRedelegation.completion_time": + x.CompletionTime = nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.PendingRedelegation")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.PendingRedelegation does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_PendingRedelegation) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "cosmos.poolrebalancer.v1.PendingRedelegation.delegator_address": + value := x.DelegatorAddress + return protoreflect.ValueOfString(value) + case "cosmos.poolrebalancer.v1.PendingRedelegation.src_validator_address": + value := x.SrcValidatorAddress + return protoreflect.ValueOfString(value) + case "cosmos.poolrebalancer.v1.PendingRedelegation.dst_validator_address": + value := x.DstValidatorAddress + return protoreflect.ValueOfString(value) + case "cosmos.poolrebalancer.v1.PendingRedelegation.amount": + value := x.Amount + return protoreflect.ValueOfMessage(value.ProtoReflect()) + case "cosmos.poolrebalancer.v1.PendingRedelegation.completion_time": + value := x.CompletionTime + return protoreflect.ValueOfMessage(value.ProtoReflect()) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.PendingRedelegation")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.PendingRedelegation does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_PendingRedelegation) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.PendingRedelegation.delegator_address": + x.DelegatorAddress = value.Interface().(string) + case "cosmos.poolrebalancer.v1.PendingRedelegation.src_validator_address": + x.SrcValidatorAddress = value.Interface().(string) + case "cosmos.poolrebalancer.v1.PendingRedelegation.dst_validator_address": + x.DstValidatorAddress = value.Interface().(string) + case "cosmos.poolrebalancer.v1.PendingRedelegation.amount": + x.Amount = value.Message().Interface().(*v1beta1.Coin) + case "cosmos.poolrebalancer.v1.PendingRedelegation.completion_time": + x.CompletionTime = value.Message().Interface().(*timestamppb.Timestamp) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.PendingRedelegation")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.PendingRedelegation does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_PendingRedelegation) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.PendingRedelegation.amount": + if x.Amount == nil { + x.Amount = new(v1beta1.Coin) + } + return protoreflect.ValueOfMessage(x.Amount.ProtoReflect()) + case "cosmos.poolrebalancer.v1.PendingRedelegation.completion_time": + if x.CompletionTime == nil { + x.CompletionTime = new(timestamppb.Timestamp) + } + return protoreflect.ValueOfMessage(x.CompletionTime.ProtoReflect()) + case "cosmos.poolrebalancer.v1.PendingRedelegation.delegator_address": + panic(fmt.Errorf("field delegator_address of message cosmos.poolrebalancer.v1.PendingRedelegation is not mutable")) + case "cosmos.poolrebalancer.v1.PendingRedelegation.src_validator_address": + panic(fmt.Errorf("field src_validator_address of message cosmos.poolrebalancer.v1.PendingRedelegation is not mutable")) + case "cosmos.poolrebalancer.v1.PendingRedelegation.dst_validator_address": + panic(fmt.Errorf("field dst_validator_address of message cosmos.poolrebalancer.v1.PendingRedelegation is not mutable")) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.PendingRedelegation")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.PendingRedelegation does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_PendingRedelegation) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.PendingRedelegation.delegator_address": + return protoreflect.ValueOfString("") + case "cosmos.poolrebalancer.v1.PendingRedelegation.src_validator_address": + return protoreflect.ValueOfString("") + case "cosmos.poolrebalancer.v1.PendingRedelegation.dst_validator_address": + return protoreflect.ValueOfString("") + case "cosmos.poolrebalancer.v1.PendingRedelegation.amount": + m := new(v1beta1.Coin) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + case "cosmos.poolrebalancer.v1.PendingRedelegation.completion_time": + m := new(timestamppb.Timestamp) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.PendingRedelegation")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.PendingRedelegation does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_PendingRedelegation) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in cosmos.poolrebalancer.v1.PendingRedelegation", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_PendingRedelegation) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_PendingRedelegation) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_PendingRedelegation) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_PendingRedelegation) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*PendingRedelegation) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + l = len(x.DelegatorAddress) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + l = len(x.SrcValidatorAddress) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + l = len(x.DstValidatorAddress) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.Amount != nil { + l = options.Size(x.Amount) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.CompletionTime != nil { + l = options.Size(x.CompletionTime) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*PendingRedelegation) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if x.CompletionTime != nil { + encoded, err := options.Marshal(x.CompletionTime) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x2a + } + if x.Amount != nil { + encoded, err := options.Marshal(x.Amount) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x22 + } + if len(x.DstValidatorAddress) > 0 { + i -= len(x.DstValidatorAddress) + copy(dAtA[i:], x.DstValidatorAddress) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.DstValidatorAddress))) + i-- + dAtA[i] = 0x1a + } + if len(x.SrcValidatorAddress) > 0 { + i -= len(x.SrcValidatorAddress) + copy(dAtA[i:], x.SrcValidatorAddress) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.SrcValidatorAddress))) + i-- + dAtA[i] = 0x12 + } + if len(x.DelegatorAddress) > 0 { + i -= len(x.DelegatorAddress) + copy(dAtA[i:], x.DelegatorAddress) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.DelegatorAddress))) + i-- + dAtA[i] = 0xa + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*PendingRedelegation) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: PendingRedelegation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: PendingRedelegation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field DelegatorAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.DelegatorAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field SrcValidatorAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.SrcValidatorAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field DstValidatorAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.DstValidatorAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.Amount == nil { + x.Amount = &v1beta1.Coin{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.Amount); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.CompletionTime == nil { + x.CompletionTime = ×tamppb.Timestamp{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.CompletionTime); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +var _ protoreflect.List = (*_QueuedRedelegation_1_list)(nil) + +type _QueuedRedelegation_1_list struct { + list *[]*PendingRedelegation +} + +func (x *_QueuedRedelegation_1_list) Len() int { + if x.list == nil { + return 0 + } + return len(*x.list) +} + +func (x *_QueuedRedelegation_1_list) Get(i int) protoreflect.Value { + return protoreflect.ValueOfMessage((*x.list)[i].ProtoReflect()) +} + +func (x *_QueuedRedelegation_1_list) Set(i int, value protoreflect.Value) { + valueUnwrapped := value.Message() + concreteValue := valueUnwrapped.Interface().(*PendingRedelegation) + (*x.list)[i] = concreteValue +} + +func (x *_QueuedRedelegation_1_list) Append(value protoreflect.Value) { + valueUnwrapped := value.Message() + concreteValue := valueUnwrapped.Interface().(*PendingRedelegation) + *x.list = append(*x.list, concreteValue) +} + +func (x *_QueuedRedelegation_1_list) AppendMutable() protoreflect.Value { + v := new(PendingRedelegation) + *x.list = append(*x.list, v) + return protoreflect.ValueOfMessage(v.ProtoReflect()) +} + +func (x *_QueuedRedelegation_1_list) Truncate(n int) { + for i := n; i < len(*x.list); i++ { + (*x.list)[i] = nil + } + *x.list = (*x.list)[:n] +} + +func (x *_QueuedRedelegation_1_list) NewElement() protoreflect.Value { + v := new(PendingRedelegation) + return protoreflect.ValueOfMessage(v.ProtoReflect()) +} + +func (x *_QueuedRedelegation_1_list) IsValid() bool { + return x.list != nil +} + +var ( + md_QueuedRedelegation protoreflect.MessageDescriptor + fd_QueuedRedelegation_entries protoreflect.FieldDescriptor +) + +func init() { + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_init() + md_QueuedRedelegation = File_cosmos_poolrebalancer_v1_poolrebalancer_proto.Messages().ByName("QueuedRedelegation") + fd_QueuedRedelegation_entries = md_QueuedRedelegation.Fields().ByName("entries") +} + +var _ protoreflect.Message = (*fastReflection_QueuedRedelegation)(nil) + +type fastReflection_QueuedRedelegation QueuedRedelegation + +func (x *QueuedRedelegation) ProtoReflect() protoreflect.Message { + return (*fastReflection_QueuedRedelegation)(x) +} + +func (x *QueuedRedelegation) slowProtoReflect() protoreflect.Message { + mi := &file_cosmos_poolrebalancer_v1_poolrebalancer_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_QueuedRedelegation_messageType fastReflection_QueuedRedelegation_messageType +var _ protoreflect.MessageType = fastReflection_QueuedRedelegation_messageType{} + +type fastReflection_QueuedRedelegation_messageType struct{} + +func (x fastReflection_QueuedRedelegation_messageType) Zero() protoreflect.Message { + return (*fastReflection_QueuedRedelegation)(nil) +} +func (x fastReflection_QueuedRedelegation_messageType) New() protoreflect.Message { + return new(fastReflection_QueuedRedelegation) +} +func (x fastReflection_QueuedRedelegation_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_QueuedRedelegation +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_QueuedRedelegation) Descriptor() protoreflect.MessageDescriptor { + return md_QueuedRedelegation +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_QueuedRedelegation) Type() protoreflect.MessageType { + return _fastReflection_QueuedRedelegation_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_QueuedRedelegation) New() protoreflect.Message { + return new(fastReflection_QueuedRedelegation) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_QueuedRedelegation) Interface() protoreflect.ProtoMessage { + return (*QueuedRedelegation)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_QueuedRedelegation) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if len(x.Entries) != 0 { + value := protoreflect.ValueOfList(&_QueuedRedelegation_1_list{list: &x.Entries}) + if !f(fd_QueuedRedelegation_entries, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_QueuedRedelegation) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueuedRedelegation.entries": + return len(x.Entries) != 0 + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueuedRedelegation")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueuedRedelegation does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueuedRedelegation) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueuedRedelegation.entries": + x.Entries = nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueuedRedelegation")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueuedRedelegation does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_QueuedRedelegation) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "cosmos.poolrebalancer.v1.QueuedRedelegation.entries": + if len(x.Entries) == 0 { + return protoreflect.ValueOfList(&_QueuedRedelegation_1_list{}) + } + listValue := &_QueuedRedelegation_1_list{list: &x.Entries} + return protoreflect.ValueOfList(listValue) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueuedRedelegation")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueuedRedelegation does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueuedRedelegation) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueuedRedelegation.entries": + lv := value.List() + clv := lv.(*_QueuedRedelegation_1_list) + x.Entries = *clv.list + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueuedRedelegation")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueuedRedelegation does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueuedRedelegation) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueuedRedelegation.entries": + if x.Entries == nil { + x.Entries = []*PendingRedelegation{} + } + value := &_QueuedRedelegation_1_list{list: &x.Entries} + return protoreflect.ValueOfList(value) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueuedRedelegation")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueuedRedelegation does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_QueuedRedelegation) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueuedRedelegation.entries": + list := []*PendingRedelegation{} + return protoreflect.ValueOfList(&_QueuedRedelegation_1_list{list: &list}) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueuedRedelegation")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueuedRedelegation does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_QueuedRedelegation) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in cosmos.poolrebalancer.v1.QueuedRedelegation", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_QueuedRedelegation) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueuedRedelegation) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_QueuedRedelegation) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_QueuedRedelegation) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*QueuedRedelegation) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + if len(x.Entries) > 0 { + for _, e := range x.Entries { + l = options.Size(e) + n += 1 + l + runtime.Sov(uint64(l)) + } + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*QueuedRedelegation) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if len(x.Entries) > 0 { + for iNdEx := len(x.Entries) - 1; iNdEx >= 0; iNdEx-- { + encoded, err := options.Marshal(x.Entries[iNdEx]) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0xa + } + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*QueuedRedelegation) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: QueuedRedelegation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: QueuedRedelegation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.Entries = append(x.Entries, &PendingRedelegation{}) + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.Entries[len(x.Entries)-1]); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +var _ protoreflect.List = (*_GenesisState_2_list)(nil) + +type _GenesisState_2_list struct { + list *[]*PendingRedelegation +} + +func (x *_GenesisState_2_list) Len() int { + if x.list == nil { + return 0 + } + return len(*x.list) +} + +func (x *_GenesisState_2_list) Get(i int) protoreflect.Value { + return protoreflect.ValueOfMessage((*x.list)[i].ProtoReflect()) +} + +func (x *_GenesisState_2_list) Set(i int, value protoreflect.Value) { + valueUnwrapped := value.Message() + concreteValue := valueUnwrapped.Interface().(*PendingRedelegation) + (*x.list)[i] = concreteValue +} + +func (x *_GenesisState_2_list) Append(value protoreflect.Value) { + valueUnwrapped := value.Message() + concreteValue := valueUnwrapped.Interface().(*PendingRedelegation) + *x.list = append(*x.list, concreteValue) +} + +func (x *_GenesisState_2_list) AppendMutable() protoreflect.Value { + v := new(PendingRedelegation) + *x.list = append(*x.list, v) + return protoreflect.ValueOfMessage(v.ProtoReflect()) +} + +func (x *_GenesisState_2_list) Truncate(n int) { + for i := n; i < len(*x.list); i++ { + (*x.list)[i] = nil + } + *x.list = (*x.list)[:n] +} + +func (x *_GenesisState_2_list) NewElement() protoreflect.Value { + v := new(PendingRedelegation) + return protoreflect.ValueOfMessage(v.ProtoReflect()) +} + +func (x *_GenesisState_2_list) IsValid() bool { + return x.list != nil +} + +var ( + md_GenesisState protoreflect.MessageDescriptor + fd_GenesisState_params protoreflect.FieldDescriptor + fd_GenesisState_pending_redelegations protoreflect.FieldDescriptor +) + +func init() { + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_init() + md_GenesisState = File_cosmos_poolrebalancer_v1_poolrebalancer_proto.Messages().ByName("GenesisState") + fd_GenesisState_params = md_GenesisState.Fields().ByName("params") + fd_GenesisState_pending_redelegations = md_GenesisState.Fields().ByName("pending_redelegations") +} + +var _ protoreflect.Message = (*fastReflection_GenesisState)(nil) + +type fastReflection_GenesisState GenesisState + +func (x *GenesisState) ProtoReflect() protoreflect.Message { + return (*fastReflection_GenesisState)(x) +} + +func (x *GenesisState) slowProtoReflect() protoreflect.Message { + mi := &file_cosmos_poolrebalancer_v1_poolrebalancer_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_GenesisState_messageType fastReflection_GenesisState_messageType +var _ protoreflect.MessageType = fastReflection_GenesisState_messageType{} + +type fastReflection_GenesisState_messageType struct{} + +func (x fastReflection_GenesisState_messageType) Zero() protoreflect.Message { + return (*fastReflection_GenesisState)(nil) +} +func (x fastReflection_GenesisState_messageType) New() protoreflect.Message { + return new(fastReflection_GenesisState) +} +func (x fastReflection_GenesisState_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_GenesisState +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_GenesisState) Descriptor() protoreflect.MessageDescriptor { + return md_GenesisState +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_GenesisState) Type() protoreflect.MessageType { + return _fastReflection_GenesisState_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_GenesisState) New() protoreflect.Message { + return new(fastReflection_GenesisState) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_GenesisState) Interface() protoreflect.ProtoMessage { + return (*GenesisState)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_GenesisState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.Params != nil { + value := protoreflect.ValueOfMessage(x.Params.ProtoReflect()) + if !f(fd_GenesisState_params, value) { + return + } + } + if len(x.PendingRedelegations) != 0 { + value := protoreflect.ValueOfList(&_GenesisState_2_list{list: &x.PendingRedelegations}) + if !f(fd_GenesisState_pending_redelegations, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_GenesisState) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.GenesisState.params": + return x.Params != nil + case "cosmos.poolrebalancer.v1.GenesisState.pending_redelegations": + return len(x.PendingRedelegations) != 0 + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.GenesisState")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.GenesisState does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_GenesisState) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.GenesisState.params": + x.Params = nil + case "cosmos.poolrebalancer.v1.GenesisState.pending_redelegations": + x.PendingRedelegations = nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.GenesisState")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.GenesisState does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_GenesisState) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "cosmos.poolrebalancer.v1.GenesisState.params": + value := x.Params + return protoreflect.ValueOfMessage(value.ProtoReflect()) + case "cosmos.poolrebalancer.v1.GenesisState.pending_redelegations": + if len(x.PendingRedelegations) == 0 { + return protoreflect.ValueOfList(&_GenesisState_2_list{}) + } + listValue := &_GenesisState_2_list{list: &x.PendingRedelegations} + return protoreflect.ValueOfList(listValue) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.GenesisState")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.GenesisState does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_GenesisState) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.GenesisState.params": + x.Params = value.Message().Interface().(*Params) + case "cosmos.poolrebalancer.v1.GenesisState.pending_redelegations": + lv := value.List() + clv := lv.(*_GenesisState_2_list) + x.PendingRedelegations = *clv.list + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.GenesisState")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.GenesisState does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_GenesisState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.GenesisState.params": + if x.Params == nil { + x.Params = new(Params) + } + return protoreflect.ValueOfMessage(x.Params.ProtoReflect()) + case "cosmos.poolrebalancer.v1.GenesisState.pending_redelegations": + if x.PendingRedelegations == nil { + x.PendingRedelegations = []*PendingRedelegation{} + } + value := &_GenesisState_2_list{list: &x.PendingRedelegations} + return protoreflect.ValueOfList(value) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.GenesisState")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.GenesisState does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_GenesisState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.GenesisState.params": + m := new(Params) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + case "cosmos.poolrebalancer.v1.GenesisState.pending_redelegations": + list := []*PendingRedelegation{} + return protoreflect.ValueOfList(&_GenesisState_2_list{list: &list}) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.GenesisState")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.GenesisState does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_GenesisState) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in cosmos.poolrebalancer.v1.GenesisState", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_GenesisState) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_GenesisState) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_GenesisState) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_GenesisState) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*GenesisState) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + if x.Params != nil { + l = options.Size(x.Params) + n += 1 + l + runtime.Sov(uint64(l)) + } + if len(x.PendingRedelegations) > 0 { + for _, e := range x.PendingRedelegations { + l = options.Size(e) + n += 1 + l + runtime.Sov(uint64(l)) + } + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*GenesisState) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if len(x.PendingRedelegations) > 0 { + for iNdEx := len(x.PendingRedelegations) - 1; iNdEx >= 0; iNdEx-- { + encoded, err := options.Marshal(x.PendingRedelegations[iNdEx]) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x12 + } + } + if x.Params != nil { + encoded, err := options.Marshal(x.Params) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0xa + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*GenesisState) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.Params == nil { + x.Params = &Params{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.Params); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field PendingRedelegations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.PendingRedelegations = append(x.PendingRedelegations, &PendingRedelegation{}) + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.PendingRedelegations[len(x.PendingRedelegations)-1]); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.0 +// protoc (unknown) +// source: cosmos/poolrebalancer/v1/poolrebalancer.proto + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Params defines the parameters for the poolrebalancer module. +type Params struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // pool_delegator_address is the account whose stake is rebalanced. + PoolDelegatorAddress string `protobuf:"bytes,1,opt,name=pool_delegator_address,json=poolDelegatorAddress,proto3" json:"pool_delegator_address,omitempty"` + // max_target_validators caps the bonded validator set size (top N by power). + MaxTargetValidators uint32 `protobuf:"varint,2,opt,name=max_target_validators,json=maxTargetValidators,proto3" json:"max_target_validators,omitempty"` + // rebalance_threshold_bp is the drift threshold in basis points. + RebalanceThresholdBp uint32 `protobuf:"varint,3,opt,name=rebalance_threshold_bp,json=rebalanceThresholdBp,proto3" json:"rebalance_threshold_bp,omitempty"` + // max_ops_per_block caps redelegation operations per block. + MaxOpsPerBlock uint32 `protobuf:"varint,4,opt,name=max_ops_per_block,json=maxOpsPerBlock,proto3" json:"max_ops_per_block,omitempty"` + // max_move_per_op caps the amount moved per operation (0 = no cap). + MaxMovePerOp string `protobuf:"bytes,5,opt,name=max_move_per_op,json=maxMovePerOp,proto3" json:"max_move_per_op,omitempty"` +} + +func (x *Params) Reset() { + *x = Params{} + if protoimpl.UnsafeEnabled { + mi := &file_cosmos_poolrebalancer_v1_poolrebalancer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Params) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Params) ProtoMessage() {} + +// Deprecated: Use Params.ProtoReflect.Descriptor instead. +func (*Params) Descriptor() ([]byte, []int) { + return file_cosmos_poolrebalancer_v1_poolrebalancer_proto_rawDescGZIP(), []int{0} +} + +func (x *Params) GetPoolDelegatorAddress() string { + if x != nil { + return x.PoolDelegatorAddress + } + return "" +} + +func (x *Params) GetMaxTargetValidators() uint32 { + if x != nil { + return x.MaxTargetValidators + } + return 0 +} + +func (x *Params) GetRebalanceThresholdBp() uint32 { + if x != nil { + return x.RebalanceThresholdBp + } + return 0 +} + +func (x *Params) GetMaxOpsPerBlock() uint32 { + if x != nil { + return x.MaxOpsPerBlock + } + return 0 +} + +func (x *Params) GetMaxMovePerOp() string { + if x != nil { + return x.MaxMovePerOp + } + return "" +} + +// PendingRedelegation is an in-flight redelegation tracked for transitive redelegation safety. +type PendingRedelegation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DelegatorAddress string `protobuf:"bytes,1,opt,name=delegator_address,json=delegatorAddress,proto3" json:"delegator_address,omitempty"` + SrcValidatorAddress string `protobuf:"bytes,2,opt,name=src_validator_address,json=srcValidatorAddress,proto3" json:"src_validator_address,omitempty"` + DstValidatorAddress string `protobuf:"bytes,3,opt,name=dst_validator_address,json=dstValidatorAddress,proto3" json:"dst_validator_address,omitempty"` + Amount *v1beta1.Coin `protobuf:"bytes,4,opt,name=amount,proto3" json:"amount,omitempty"` + CompletionTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=completion_time,json=completionTime,proto3" json:"completion_time,omitempty"` +} + +func (x *PendingRedelegation) Reset() { + *x = PendingRedelegation{} + if protoimpl.UnsafeEnabled { + mi := &file_cosmos_poolrebalancer_v1_poolrebalancer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PendingRedelegation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PendingRedelegation) ProtoMessage() {} + +// Deprecated: Use PendingRedelegation.ProtoReflect.Descriptor instead. +func (*PendingRedelegation) Descriptor() ([]byte, []int) { + return file_cosmos_poolrebalancer_v1_poolrebalancer_proto_rawDescGZIP(), []int{1} +} + +func (x *PendingRedelegation) GetDelegatorAddress() string { + if x != nil { + return x.DelegatorAddress + } + return "" +} + +func (x *PendingRedelegation) GetSrcValidatorAddress() string { + if x != nil { + return x.SrcValidatorAddress + } + return "" +} + +func (x *PendingRedelegation) GetDstValidatorAddress() string { + if x != nil { + return x.DstValidatorAddress + } + return "" +} + +func (x *PendingRedelegation) GetAmount() *v1beta1.Coin { + if x != nil { + return x.Amount + } + return nil +} + +func (x *PendingRedelegation) GetCompletionTime() *timestamppb.Timestamp { + if x != nil { + return x.CompletionTime + } + return nil +} + +// QueuedRedelegation groups redelegations that share the same completion time. +type QueuedRedelegation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*PendingRedelegation `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *QueuedRedelegation) Reset() { + *x = QueuedRedelegation{} + if protoimpl.UnsafeEnabled { + mi := &file_cosmos_poolrebalancer_v1_poolrebalancer_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueuedRedelegation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueuedRedelegation) ProtoMessage() {} + +// Deprecated: Use QueuedRedelegation.ProtoReflect.Descriptor instead. +func (*QueuedRedelegation) Descriptor() ([]byte, []int) { + return file_cosmos_poolrebalancer_v1_poolrebalancer_proto_rawDescGZIP(), []int{2} +} + +func (x *QueuedRedelegation) GetEntries() []*PendingRedelegation { + if x != nil { + return x.Entries + } + return nil +} + +// GenesisState defines the poolrebalancer module's genesis state. +type GenesisState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Params *Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params,omitempty"` + // pending_redelegations allow restoring in-flight state on restart. + // They are optional for initial deployments. + PendingRedelegations []*PendingRedelegation `protobuf:"bytes,2,rep,name=pending_redelegations,json=pendingRedelegations,proto3" json:"pending_redelegations,omitempty"` +} + +func (x *GenesisState) Reset() { + *x = GenesisState{} + if protoimpl.UnsafeEnabled { + mi := &file_cosmos_poolrebalancer_v1_poolrebalancer_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenesisState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenesisState) ProtoMessage() {} + +// Deprecated: Use GenesisState.ProtoReflect.Descriptor instead. +func (*GenesisState) Descriptor() ([]byte, []int) { + return file_cosmos_poolrebalancer_v1_poolrebalancer_proto_rawDescGZIP(), []int{3} +} + +func (x *GenesisState) GetParams() *Params { + if x != nil { + return x.Params + } + return nil +} + +func (x *GenesisState) GetPendingRedelegations() []*PendingRedelegation { + if x != nil { + return x.PendingRedelegations + } + return nil +} + +var File_cosmos_poolrebalancer_v1_poolrebalancer_proto protoreflect.FileDescriptor + +var file_cosmos_poolrebalancer_v1_poolrebalancer_proto_rawDesc = []byte{ + 0x0a, 0x2d, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6f, 0x6c, 0x72, + 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x18, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, + 0x73, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x63, + 0x6f, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x67, 0x6f, 0x67, 0x6f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x67, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xb8, 0x02, 0x0a, 0x06, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x70, + 0x6f, 0x6f, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x6f, 0x6f, + 0x6c, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x6f, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x13, 0x6d, 0x61, 0x78, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x5f, 0x62, 0x70, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x14, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x70, 0x12, 0x29, 0x0a, 0x11, 0x6d, + 0x61, 0x78, 0x5f, 0x6f, 0x70, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x4f, 0x70, 0x73, 0x50, 0x65, + 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x44, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x6f, + 0x76, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x1d, 0xc8, 0xde, 0x1f, 0x00, 0xda, 0xde, 0x1f, 0x15, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, + 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, 0x6d, 0x61, 0x74, 0x68, 0x2e, 0x49, 0x6e, 0x74, 0x52, 0x0c, + 0x6d, 0x61, 0x78, 0x4d, 0x6f, 0x76, 0x65, 0x50, 0x65, 0x72, 0x4f, 0x70, 0x4a, 0x04, 0x08, 0x06, + 0x10, 0x07, 0x52, 0x17, 0x75, 0x73, 0x65, 0x5f, 0x75, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, + 0x74, 0x65, 0x5f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x22, 0xb2, 0x02, 0x0a, 0x13, + 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x6f, 0x72, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, + 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x6f, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x32, 0x0a, 0x15, 0x73, 0x72, 0x63, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, + 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x13, 0x73, 0x72, 0x63, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x13, 0x64, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, + 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x37, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, + 0x73, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, + 0x6f, 0x69, 0x6e, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x4d, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x08, 0xc8, 0xde, 0x1f, 0x00, 0x90, 0xdf, 0x1f, 0x01, + 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, + 0x22, 0x63, 0x0a, 0x12, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x52, 0x65, 0x64, 0x65, 0x6c, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, + 0x2e, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, + 0x76, 0x31, 0x2e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x64, 0x65, 0x6c, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x07, 0x65, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0xd5, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x6e, 0x65, 0x73, 0x69, + 0x73, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, + 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x06, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x68, 0x0a, 0x15, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x5f, 0x72, 0x65, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, + 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x14, 0x70, 0x65, 0x6e, 0x64, + 0x69, 0x6e, 0x67, 0x52, 0x65, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x15, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, + 0x75, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0xf5, 0x01, + 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, 0x6f, 0x6f, + 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x13, + 0x50, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, + 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x70, + 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2f, 0x76, 0x31, + 0x3b, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x76, + 0x31, 0xa2, 0x02, 0x03, 0x43, 0x50, 0x58, 0xaa, 0x02, 0x18, 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, + 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, + 0x56, 0x31, 0xca, 0x02, 0x18, 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x5c, 0x50, 0x6f, 0x6f, 0x6c, + 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x24, + 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x5c, 0x50, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1a, 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x3a, 0x3a, 0x50, + 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x3a, 0x3a, 0x56, + 0x31, 0xc8, 0xe1, 0x1e, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_rawDescOnce sync.Once + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_rawDescData = file_cosmos_poolrebalancer_v1_poolrebalancer_proto_rawDesc +) + +func file_cosmos_poolrebalancer_v1_poolrebalancer_proto_rawDescGZIP() []byte { + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_rawDescOnce.Do(func() { + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_rawDescData = protoimpl.X.CompressGZIP(file_cosmos_poolrebalancer_v1_poolrebalancer_proto_rawDescData) + }) + return file_cosmos_poolrebalancer_v1_poolrebalancer_proto_rawDescData +} + +var file_cosmos_poolrebalancer_v1_poolrebalancer_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_cosmos_poolrebalancer_v1_poolrebalancer_proto_goTypes = []interface{}{ + (*Params)(nil), // 0: cosmos.poolrebalancer.v1.Params + (*PendingRedelegation)(nil), // 1: cosmos.poolrebalancer.v1.PendingRedelegation + (*QueuedRedelegation)(nil), // 2: cosmos.poolrebalancer.v1.QueuedRedelegation + (*GenesisState)(nil), // 3: cosmos.poolrebalancer.v1.GenesisState + (*v1beta1.Coin)(nil), // 4: cosmos.base.v1beta1.Coin + (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp +} +var file_cosmos_poolrebalancer_v1_poolrebalancer_proto_depIdxs = []int32{ + 4, // 0: cosmos.poolrebalancer.v1.PendingRedelegation.amount:type_name -> cosmos.base.v1beta1.Coin + 5, // 1: cosmos.poolrebalancer.v1.PendingRedelegation.completion_time:type_name -> google.protobuf.Timestamp + 1, // 2: cosmos.poolrebalancer.v1.QueuedRedelegation.entries:type_name -> cosmos.poolrebalancer.v1.PendingRedelegation + 0, // 3: cosmos.poolrebalancer.v1.GenesisState.params:type_name -> cosmos.poolrebalancer.v1.Params + 1, // 4: cosmos.poolrebalancer.v1.GenesisState.pending_redelegations:type_name -> cosmos.poolrebalancer.v1.PendingRedelegation + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_cosmos_poolrebalancer_v1_poolrebalancer_proto_init() } +func file_cosmos_poolrebalancer_v1_poolrebalancer_proto_init() { + if File_cosmos_poolrebalancer_v1_poolrebalancer_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Params); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PendingRedelegation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueuedRedelegation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GenesisState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cosmos_poolrebalancer_v1_poolrebalancer_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cosmos_poolrebalancer_v1_poolrebalancer_proto_goTypes, + DependencyIndexes: file_cosmos_poolrebalancer_v1_poolrebalancer_proto_depIdxs, + MessageInfos: file_cosmos_poolrebalancer_v1_poolrebalancer_proto_msgTypes, + }.Build() + File_cosmos_poolrebalancer_v1_poolrebalancer_proto = out.File + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_rawDesc = nil + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_goTypes = nil + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_depIdxs = nil +} diff --git a/api/cosmos/poolrebalancer/v1/query.pulsar.go b/api/cosmos/poolrebalancer/v1/query.pulsar.go new file mode 100644 index 00000000..aa123b05 --- /dev/null +++ b/api/cosmos/poolrebalancer/v1/query.pulsar.go @@ -0,0 +1,2172 @@ +// Code generated by protoc-gen-go-pulsar. DO NOT EDIT. +package poolrebalancerv1 + +import ( + _ "cosmossdk.io/api/amino" + v1beta1 "cosmossdk.io/api/cosmos/base/query/v1beta1" + fmt "fmt" + runtime "github.com/cosmos/cosmos-proto/runtime" + _ "github.com/cosmos/gogoproto/gogoproto" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoiface "google.golang.org/protobuf/runtime/protoiface" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + reflect "reflect" + sync "sync" +) + +var ( + md_QueryParamsRequest protoreflect.MessageDescriptor +) + +func init() { + file_cosmos_poolrebalancer_v1_query_proto_init() + md_QueryParamsRequest = File_cosmos_poolrebalancer_v1_query_proto.Messages().ByName("QueryParamsRequest") +} + +var _ protoreflect.Message = (*fastReflection_QueryParamsRequest)(nil) + +type fastReflection_QueryParamsRequest QueryParamsRequest + +func (x *QueryParamsRequest) ProtoReflect() protoreflect.Message { + return (*fastReflection_QueryParamsRequest)(x) +} + +func (x *QueryParamsRequest) slowProtoReflect() protoreflect.Message { + mi := &file_cosmos_poolrebalancer_v1_query_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_QueryParamsRequest_messageType fastReflection_QueryParamsRequest_messageType +var _ protoreflect.MessageType = fastReflection_QueryParamsRequest_messageType{} + +type fastReflection_QueryParamsRequest_messageType struct{} + +func (x fastReflection_QueryParamsRequest_messageType) Zero() protoreflect.Message { + return (*fastReflection_QueryParamsRequest)(nil) +} +func (x fastReflection_QueryParamsRequest_messageType) New() protoreflect.Message { + return new(fastReflection_QueryParamsRequest) +} +func (x fastReflection_QueryParamsRequest_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_QueryParamsRequest +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_QueryParamsRequest) Descriptor() protoreflect.MessageDescriptor { + return md_QueryParamsRequest +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_QueryParamsRequest) Type() protoreflect.MessageType { + return _fastReflection_QueryParamsRequest_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_QueryParamsRequest) New() protoreflect.Message { + return new(fastReflection_QueryParamsRequest) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_QueryParamsRequest) Interface() protoreflect.ProtoMessage { + return (*QueryParamsRequest)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_QueryParamsRequest) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_QueryParamsRequest) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryParamsRequest")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryParamsRequest does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryParamsRequest) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryParamsRequest")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryParamsRequest does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_QueryParamsRequest) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryParamsRequest")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryParamsRequest does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryParamsRequest) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryParamsRequest")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryParamsRequest does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryParamsRequest) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryParamsRequest")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryParamsRequest does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_QueryParamsRequest) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryParamsRequest")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryParamsRequest does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_QueryParamsRequest) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in cosmos.poolrebalancer.v1.QueryParamsRequest", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_QueryParamsRequest) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryParamsRequest) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_QueryParamsRequest) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_QueryParamsRequest) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*QueryParamsRequest) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*QueryParamsRequest) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*QueryParamsRequest) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +var ( + md_QueryParamsResponse protoreflect.MessageDescriptor + fd_QueryParamsResponse_params protoreflect.FieldDescriptor +) + +func init() { + file_cosmos_poolrebalancer_v1_query_proto_init() + md_QueryParamsResponse = File_cosmos_poolrebalancer_v1_query_proto.Messages().ByName("QueryParamsResponse") + fd_QueryParamsResponse_params = md_QueryParamsResponse.Fields().ByName("params") +} + +var _ protoreflect.Message = (*fastReflection_QueryParamsResponse)(nil) + +type fastReflection_QueryParamsResponse QueryParamsResponse + +func (x *QueryParamsResponse) ProtoReflect() protoreflect.Message { + return (*fastReflection_QueryParamsResponse)(x) +} + +func (x *QueryParamsResponse) slowProtoReflect() protoreflect.Message { + mi := &file_cosmos_poolrebalancer_v1_query_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_QueryParamsResponse_messageType fastReflection_QueryParamsResponse_messageType +var _ protoreflect.MessageType = fastReflection_QueryParamsResponse_messageType{} + +type fastReflection_QueryParamsResponse_messageType struct{} + +func (x fastReflection_QueryParamsResponse_messageType) Zero() protoreflect.Message { + return (*fastReflection_QueryParamsResponse)(nil) +} +func (x fastReflection_QueryParamsResponse_messageType) New() protoreflect.Message { + return new(fastReflection_QueryParamsResponse) +} +func (x fastReflection_QueryParamsResponse_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_QueryParamsResponse +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_QueryParamsResponse) Descriptor() protoreflect.MessageDescriptor { + return md_QueryParamsResponse +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_QueryParamsResponse) Type() protoreflect.MessageType { + return _fastReflection_QueryParamsResponse_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_QueryParamsResponse) New() protoreflect.Message { + return new(fastReflection_QueryParamsResponse) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_QueryParamsResponse) Interface() protoreflect.ProtoMessage { + return (*QueryParamsResponse)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_QueryParamsResponse) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.Params != nil { + value := protoreflect.ValueOfMessage(x.Params.ProtoReflect()) + if !f(fd_QueryParamsResponse_params, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_QueryParamsResponse) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueryParamsResponse.params": + return x.Params != nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryParamsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryParamsResponse does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryParamsResponse) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueryParamsResponse.params": + x.Params = nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryParamsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryParamsResponse does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_QueryParamsResponse) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "cosmos.poolrebalancer.v1.QueryParamsResponse.params": + value := x.Params + return protoreflect.ValueOfMessage(value.ProtoReflect()) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryParamsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryParamsResponse does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryParamsResponse) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueryParamsResponse.params": + x.Params = value.Message().Interface().(*Params) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryParamsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryParamsResponse does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryParamsResponse) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueryParamsResponse.params": + if x.Params == nil { + x.Params = new(Params) + } + return protoreflect.ValueOfMessage(x.Params.ProtoReflect()) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryParamsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryParamsResponse does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_QueryParamsResponse) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueryParamsResponse.params": + m := new(Params) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryParamsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryParamsResponse does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_QueryParamsResponse) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in cosmos.poolrebalancer.v1.QueryParamsResponse", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_QueryParamsResponse) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryParamsResponse) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_QueryParamsResponse) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_QueryParamsResponse) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*QueryParamsResponse) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + if x.Params != nil { + l = options.Size(x.Params) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*QueryParamsResponse) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if x.Params != nil { + encoded, err := options.Marshal(x.Params) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0xa + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*QueryParamsResponse) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.Params == nil { + x.Params = &Params{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.Params); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +var ( + md_QueryPendingRedelegationsRequest protoreflect.MessageDescriptor + fd_QueryPendingRedelegationsRequest_pagination protoreflect.FieldDescriptor +) + +func init() { + file_cosmos_poolrebalancer_v1_query_proto_init() + md_QueryPendingRedelegationsRequest = File_cosmos_poolrebalancer_v1_query_proto.Messages().ByName("QueryPendingRedelegationsRequest") + fd_QueryPendingRedelegationsRequest_pagination = md_QueryPendingRedelegationsRequest.Fields().ByName("pagination") +} + +var _ protoreflect.Message = (*fastReflection_QueryPendingRedelegationsRequest)(nil) + +type fastReflection_QueryPendingRedelegationsRequest QueryPendingRedelegationsRequest + +func (x *QueryPendingRedelegationsRequest) ProtoReflect() protoreflect.Message { + return (*fastReflection_QueryPendingRedelegationsRequest)(x) +} + +func (x *QueryPendingRedelegationsRequest) slowProtoReflect() protoreflect.Message { + mi := &file_cosmos_poolrebalancer_v1_query_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_QueryPendingRedelegationsRequest_messageType fastReflection_QueryPendingRedelegationsRequest_messageType +var _ protoreflect.MessageType = fastReflection_QueryPendingRedelegationsRequest_messageType{} + +type fastReflection_QueryPendingRedelegationsRequest_messageType struct{} + +func (x fastReflection_QueryPendingRedelegationsRequest_messageType) Zero() protoreflect.Message { + return (*fastReflection_QueryPendingRedelegationsRequest)(nil) +} +func (x fastReflection_QueryPendingRedelegationsRequest_messageType) New() protoreflect.Message { + return new(fastReflection_QueryPendingRedelegationsRequest) +} +func (x fastReflection_QueryPendingRedelegationsRequest_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_QueryPendingRedelegationsRequest +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_QueryPendingRedelegationsRequest) Descriptor() protoreflect.MessageDescriptor { + return md_QueryPendingRedelegationsRequest +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_QueryPendingRedelegationsRequest) Type() protoreflect.MessageType { + return _fastReflection_QueryPendingRedelegationsRequest_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_QueryPendingRedelegationsRequest) New() protoreflect.Message { + return new(fastReflection_QueryPendingRedelegationsRequest) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_QueryPendingRedelegationsRequest) Interface() protoreflect.ProtoMessage { + return (*QueryPendingRedelegationsRequest)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_QueryPendingRedelegationsRequest) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.Pagination != nil { + value := protoreflect.ValueOfMessage(x.Pagination.ProtoReflect()) + if !f(fd_QueryPendingRedelegationsRequest_pagination, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_QueryPendingRedelegationsRequest) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest.pagination": + return x.Pagination != nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryPendingRedelegationsRequest) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest.pagination": + x.Pagination = nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_QueryPendingRedelegationsRequest) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest.pagination": + value := x.Pagination + return protoreflect.ValueOfMessage(value.ProtoReflect()) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryPendingRedelegationsRequest) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest.pagination": + x.Pagination = value.Message().Interface().(*v1beta1.PageRequest) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryPendingRedelegationsRequest) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest.pagination": + if x.Pagination == nil { + x.Pagination = new(v1beta1.PageRequest) + } + return protoreflect.ValueOfMessage(x.Pagination.ProtoReflect()) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_QueryPendingRedelegationsRequest) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest.pagination": + m := new(v1beta1.PageRequest) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_QueryPendingRedelegationsRequest) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_QueryPendingRedelegationsRequest) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryPendingRedelegationsRequest) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_QueryPendingRedelegationsRequest) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_QueryPendingRedelegationsRequest) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*QueryPendingRedelegationsRequest) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + if x.Pagination != nil { + l = options.Size(x.Pagination) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*QueryPendingRedelegationsRequest) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if x.Pagination != nil { + encoded, err := options.Marshal(x.Pagination) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0xa + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*QueryPendingRedelegationsRequest) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: QueryPendingRedelegationsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: QueryPendingRedelegationsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.Pagination == nil { + x.Pagination = &v1beta1.PageRequest{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.Pagination); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +var _ protoreflect.List = (*_QueryPendingRedelegationsResponse_1_list)(nil) + +type _QueryPendingRedelegationsResponse_1_list struct { + list *[]*PendingRedelegation +} + +func (x *_QueryPendingRedelegationsResponse_1_list) Len() int { + if x.list == nil { + return 0 + } + return len(*x.list) +} + +func (x *_QueryPendingRedelegationsResponse_1_list) Get(i int) protoreflect.Value { + return protoreflect.ValueOfMessage((*x.list)[i].ProtoReflect()) +} + +func (x *_QueryPendingRedelegationsResponse_1_list) Set(i int, value protoreflect.Value) { + valueUnwrapped := value.Message() + concreteValue := valueUnwrapped.Interface().(*PendingRedelegation) + (*x.list)[i] = concreteValue +} + +func (x *_QueryPendingRedelegationsResponse_1_list) Append(value protoreflect.Value) { + valueUnwrapped := value.Message() + concreteValue := valueUnwrapped.Interface().(*PendingRedelegation) + *x.list = append(*x.list, concreteValue) +} + +func (x *_QueryPendingRedelegationsResponse_1_list) AppendMutable() protoreflect.Value { + v := new(PendingRedelegation) + *x.list = append(*x.list, v) + return protoreflect.ValueOfMessage(v.ProtoReflect()) +} + +func (x *_QueryPendingRedelegationsResponse_1_list) Truncate(n int) { + for i := n; i < len(*x.list); i++ { + (*x.list)[i] = nil + } + *x.list = (*x.list)[:n] +} + +func (x *_QueryPendingRedelegationsResponse_1_list) NewElement() protoreflect.Value { + v := new(PendingRedelegation) + return protoreflect.ValueOfMessage(v.ProtoReflect()) +} + +func (x *_QueryPendingRedelegationsResponse_1_list) IsValid() bool { + return x.list != nil +} + +var ( + md_QueryPendingRedelegationsResponse protoreflect.MessageDescriptor + fd_QueryPendingRedelegationsResponse_redelegations protoreflect.FieldDescriptor + fd_QueryPendingRedelegationsResponse_pagination protoreflect.FieldDescriptor +) + +func init() { + file_cosmos_poolrebalancer_v1_query_proto_init() + md_QueryPendingRedelegationsResponse = File_cosmos_poolrebalancer_v1_query_proto.Messages().ByName("QueryPendingRedelegationsResponse") + fd_QueryPendingRedelegationsResponse_redelegations = md_QueryPendingRedelegationsResponse.Fields().ByName("redelegations") + fd_QueryPendingRedelegationsResponse_pagination = md_QueryPendingRedelegationsResponse.Fields().ByName("pagination") +} + +var _ protoreflect.Message = (*fastReflection_QueryPendingRedelegationsResponse)(nil) + +type fastReflection_QueryPendingRedelegationsResponse QueryPendingRedelegationsResponse + +func (x *QueryPendingRedelegationsResponse) ProtoReflect() protoreflect.Message { + return (*fastReflection_QueryPendingRedelegationsResponse)(x) +} + +func (x *QueryPendingRedelegationsResponse) slowProtoReflect() protoreflect.Message { + mi := &file_cosmos_poolrebalancer_v1_query_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_QueryPendingRedelegationsResponse_messageType fastReflection_QueryPendingRedelegationsResponse_messageType +var _ protoreflect.MessageType = fastReflection_QueryPendingRedelegationsResponse_messageType{} + +type fastReflection_QueryPendingRedelegationsResponse_messageType struct{} + +func (x fastReflection_QueryPendingRedelegationsResponse_messageType) Zero() protoreflect.Message { + return (*fastReflection_QueryPendingRedelegationsResponse)(nil) +} +func (x fastReflection_QueryPendingRedelegationsResponse_messageType) New() protoreflect.Message { + return new(fastReflection_QueryPendingRedelegationsResponse) +} +func (x fastReflection_QueryPendingRedelegationsResponse_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_QueryPendingRedelegationsResponse +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_QueryPendingRedelegationsResponse) Descriptor() protoreflect.MessageDescriptor { + return md_QueryPendingRedelegationsResponse +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_QueryPendingRedelegationsResponse) Type() protoreflect.MessageType { + return _fastReflection_QueryPendingRedelegationsResponse_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_QueryPendingRedelegationsResponse) New() protoreflect.Message { + return new(fastReflection_QueryPendingRedelegationsResponse) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_QueryPendingRedelegationsResponse) Interface() protoreflect.ProtoMessage { + return (*QueryPendingRedelegationsResponse)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_QueryPendingRedelegationsResponse) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if len(x.Redelegations) != 0 { + value := protoreflect.ValueOfList(&_QueryPendingRedelegationsResponse_1_list{list: &x.Redelegations}) + if !f(fd_QueryPendingRedelegationsResponse_redelegations, value) { + return + } + } + if x.Pagination != nil { + value := protoreflect.ValueOfMessage(x.Pagination.ProtoReflect()) + if !f(fd_QueryPendingRedelegationsResponse_pagination, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_QueryPendingRedelegationsResponse) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse.redelegations": + return len(x.Redelegations) != 0 + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse.pagination": + return x.Pagination != nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryPendingRedelegationsResponse) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse.redelegations": + x.Redelegations = nil + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse.pagination": + x.Pagination = nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_QueryPendingRedelegationsResponse) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse.redelegations": + if len(x.Redelegations) == 0 { + return protoreflect.ValueOfList(&_QueryPendingRedelegationsResponse_1_list{}) + } + listValue := &_QueryPendingRedelegationsResponse_1_list{list: &x.Redelegations} + return protoreflect.ValueOfList(listValue) + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse.pagination": + value := x.Pagination + return protoreflect.ValueOfMessage(value.ProtoReflect()) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryPendingRedelegationsResponse) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse.redelegations": + lv := value.List() + clv := lv.(*_QueryPendingRedelegationsResponse_1_list) + x.Redelegations = *clv.list + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse.pagination": + x.Pagination = value.Message().Interface().(*v1beta1.PageResponse) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryPendingRedelegationsResponse) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse.redelegations": + if x.Redelegations == nil { + x.Redelegations = []*PendingRedelegation{} + } + value := &_QueryPendingRedelegationsResponse_1_list{list: &x.Redelegations} + return protoreflect.ValueOfList(value) + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse.pagination": + if x.Pagination == nil { + x.Pagination = new(v1beta1.PageResponse) + } + return protoreflect.ValueOfMessage(x.Pagination.ProtoReflect()) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_QueryPendingRedelegationsResponse) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse.redelegations": + list := []*PendingRedelegation{} + return protoreflect.ValueOfList(&_QueryPendingRedelegationsResponse_1_list{list: &list}) + case "cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse.pagination": + m := new(v1beta1.PageResponse) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_QueryPendingRedelegationsResponse) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_QueryPendingRedelegationsResponse) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_QueryPendingRedelegationsResponse) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_QueryPendingRedelegationsResponse) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_QueryPendingRedelegationsResponse) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*QueryPendingRedelegationsResponse) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + if len(x.Redelegations) > 0 { + for _, e := range x.Redelegations { + l = options.Size(e) + n += 1 + l + runtime.Sov(uint64(l)) + } + } + if x.Pagination != nil { + l = options.Size(x.Pagination) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*QueryPendingRedelegationsResponse) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if x.Pagination != nil { + encoded, err := options.Marshal(x.Pagination) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x12 + } + if len(x.Redelegations) > 0 { + for iNdEx := len(x.Redelegations) - 1; iNdEx >= 0; iNdEx-- { + encoded, err := options.Marshal(x.Redelegations[iNdEx]) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0xa + } + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*QueryPendingRedelegationsResponse) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: QueryPendingRedelegationsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: QueryPendingRedelegationsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Redelegations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.Redelegations = append(x.Redelegations, &PendingRedelegation{}) + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.Redelegations[len(x.Redelegations)-1]); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.Pagination == nil { + x.Pagination = &v1beta1.PageResponse{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.Pagination); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.0 +// protoc (unknown) +// source: cosmos/poolrebalancer/v1/query.proto + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// QueryParamsRequest is the request type for the Query/Params RPC method. +type QueryParamsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *QueryParamsRequest) Reset() { + *x = QueryParamsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_cosmos_poolrebalancer_v1_query_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryParamsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryParamsRequest) ProtoMessage() {} + +// Deprecated: Use QueryParamsRequest.ProtoReflect.Descriptor instead. +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return file_cosmos_poolrebalancer_v1_query_proto_rawDescGZIP(), []int{0} +} + +// QueryParamsResponse is the response type for the Query/Params RPC method. +type QueryParamsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Params *Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params,omitempty"` +} + +func (x *QueryParamsResponse) Reset() { + *x = QueryParamsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_cosmos_poolrebalancer_v1_query_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryParamsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryParamsResponse) ProtoMessage() {} + +// Deprecated: Use QueryParamsResponse.ProtoReflect.Descriptor instead. +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return file_cosmos_poolrebalancer_v1_query_proto_rawDescGZIP(), []int{1} +} + +func (x *QueryParamsResponse) GetParams() *Params { + if x != nil { + return x.Params + } + return nil +} + +// QueryPendingRedelegationsRequest is the request type for the Query/PendingRedelegations RPC method. +type QueryPendingRedelegationsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // pagination defines an optional pagination for the request. + Pagination *v1beta1.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (x *QueryPendingRedelegationsRequest) Reset() { + *x = QueryPendingRedelegationsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_cosmos_poolrebalancer_v1_query_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryPendingRedelegationsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryPendingRedelegationsRequest) ProtoMessage() {} + +// Deprecated: Use QueryPendingRedelegationsRequest.ProtoReflect.Descriptor instead. +func (*QueryPendingRedelegationsRequest) Descriptor() ([]byte, []int) { + return file_cosmos_poolrebalancer_v1_query_proto_rawDescGZIP(), []int{2} +} + +func (x *QueryPendingRedelegationsRequest) GetPagination() *v1beta1.PageRequest { + if x != nil { + return x.Pagination + } + return nil +} + +// QueryPendingRedelegationsResponse is the response type for the Query/PendingRedelegations RPC method. +type QueryPendingRedelegationsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Redelegations []*PendingRedelegation `protobuf:"bytes,1,rep,name=redelegations,proto3" json:"redelegations,omitempty"` + Pagination *v1beta1.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (x *QueryPendingRedelegationsResponse) Reset() { + *x = QueryPendingRedelegationsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_cosmos_poolrebalancer_v1_query_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryPendingRedelegationsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryPendingRedelegationsResponse) ProtoMessage() {} + +// Deprecated: Use QueryPendingRedelegationsResponse.ProtoReflect.Descriptor instead. +func (*QueryPendingRedelegationsResponse) Descriptor() ([]byte, []int) { + return file_cosmos_poolrebalancer_v1_query_proto_rawDescGZIP(), []int{3} +} + +func (x *QueryPendingRedelegationsResponse) GetRedelegations() []*PendingRedelegation { + if x != nil { + return x.Redelegations + } + return nil +} + +func (x *QueryPendingRedelegationsResponse) GetPagination() *v1beta1.PageResponse { + if x != nil { + return x.Pagination + } + return nil +} + +var File_cosmos_poolrebalancer_v1_query_proto protoreflect.FileDescriptor + +var file_cosmos_poolrebalancer_v1_query_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, + 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, 0x31, + 0x1a, 0x11, 0x61, 0x6d, 0x69, 0x6e, 0x6f, 0x2f, 0x61, 0x6d, 0x69, 0x6e, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x62, 0x61, 0x73, 0x65, + 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x70, + 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x2d, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, + 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, + 0x67, 0x6f, 0x67, 0x6f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x67, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x14, 0x0a, 0x12, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5a, 0x0a, 0x13, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x43, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x42, 0x09, 0xc8, 0xde, 0x1f, 0x00, 0xa8, 0xe7, 0xb0, 0x2a, 0x01, 0x52, 0x06, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x22, 0x6a, 0x0a, 0x20, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x65, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, + 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0xcc, 0x01, 0x0a, 0x21, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x0d, 0x72, 0x65, 0x64, 0x65, 0x6c, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x52, 0x65, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xc8, 0xde, + 0x1f, 0x00, 0xa8, 0xe7, 0xb0, 0x2a, 0x01, 0x52, 0x0d, 0x72, 0x65, 0x64, 0x65, 0x6c, 0x65, 0x67, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x47, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x6f, 0x73, + 0x6d, 0x6f, 0x73, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, + 0xe4, 0x02, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x8f, 0x01, 0x0a, 0x06, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, 0x6f, + 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, 0x6f, 0x6f, 0x6c, + 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x2f, 0x63, 0x6f, 0x73, 0x6d, + 0x6f, 0x73, 0x2f, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0xc8, 0x01, 0x0a, 0x14, + 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3a, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, 0x6f, + 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x64, 0x65, + 0x6c, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x3b, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, + 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x37, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x12, 0x2f, 0x2f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x70, + 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2f, 0x76, 0x31, + 0x2f, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x65, 0x6c, 0x65, 0x67, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0xec, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x63, + 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, + 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x70, + 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2f, 0x76, 0x31, + 0x3b, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x76, + 0x31, 0xa2, 0x02, 0x03, 0x43, 0x50, 0x58, 0xaa, 0x02, 0x18, 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, + 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, + 0x56, 0x31, 0xca, 0x02, 0x18, 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x5c, 0x50, 0x6f, 0x6f, 0x6c, + 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x24, + 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x5c, 0x50, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1a, 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x3a, 0x3a, 0x50, + 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x3a, 0x3a, 0x56, + 0x31, 0xc8, 0xe1, 0x1e, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cosmos_poolrebalancer_v1_query_proto_rawDescOnce sync.Once + file_cosmos_poolrebalancer_v1_query_proto_rawDescData = file_cosmos_poolrebalancer_v1_query_proto_rawDesc +) + +func file_cosmos_poolrebalancer_v1_query_proto_rawDescGZIP() []byte { + file_cosmos_poolrebalancer_v1_query_proto_rawDescOnce.Do(func() { + file_cosmos_poolrebalancer_v1_query_proto_rawDescData = protoimpl.X.CompressGZIP(file_cosmos_poolrebalancer_v1_query_proto_rawDescData) + }) + return file_cosmos_poolrebalancer_v1_query_proto_rawDescData +} + +var file_cosmos_poolrebalancer_v1_query_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_cosmos_poolrebalancer_v1_query_proto_goTypes = []interface{}{ + (*QueryParamsRequest)(nil), // 0: cosmos.poolrebalancer.v1.QueryParamsRequest + (*QueryParamsResponse)(nil), // 1: cosmos.poolrebalancer.v1.QueryParamsResponse + (*QueryPendingRedelegationsRequest)(nil), // 2: cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest + (*QueryPendingRedelegationsResponse)(nil), // 3: cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse + (*Params)(nil), // 4: cosmos.poolrebalancer.v1.Params + (*v1beta1.PageRequest)(nil), // 5: cosmos.base.query.v1beta1.PageRequest + (*PendingRedelegation)(nil), // 6: cosmos.poolrebalancer.v1.PendingRedelegation + (*v1beta1.PageResponse)(nil), // 7: cosmos.base.query.v1beta1.PageResponse +} +var file_cosmos_poolrebalancer_v1_query_proto_depIdxs = []int32{ + 4, // 0: cosmos.poolrebalancer.v1.QueryParamsResponse.params:type_name -> cosmos.poolrebalancer.v1.Params + 5, // 1: cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest.pagination:type_name -> cosmos.base.query.v1beta1.PageRequest + 6, // 2: cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse.redelegations:type_name -> cosmos.poolrebalancer.v1.PendingRedelegation + 7, // 3: cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse.pagination:type_name -> cosmos.base.query.v1beta1.PageResponse + 0, // 4: cosmos.poolrebalancer.v1.Query.Params:input_type -> cosmos.poolrebalancer.v1.QueryParamsRequest + 2, // 5: cosmos.poolrebalancer.v1.Query.PendingRedelegations:input_type -> cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest + 1, // 6: cosmos.poolrebalancer.v1.Query.Params:output_type -> cosmos.poolrebalancer.v1.QueryParamsResponse + 3, // 7: cosmos.poolrebalancer.v1.Query.PendingRedelegations:output_type -> cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse + 6, // [6:8] is the sub-list for method output_type + 4, // [4:6] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_cosmos_poolrebalancer_v1_query_proto_init() } +func file_cosmos_poolrebalancer_v1_query_proto_init() { + if File_cosmos_poolrebalancer_v1_query_proto != nil { + return + } + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_init() + if !protoimpl.UnsafeEnabled { + file_cosmos_poolrebalancer_v1_query_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryParamsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cosmos_poolrebalancer_v1_query_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryParamsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cosmos_poolrebalancer_v1_query_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryPendingRedelegationsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cosmos_poolrebalancer_v1_query_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryPendingRedelegationsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cosmos_poolrebalancer_v1_query_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_cosmos_poolrebalancer_v1_query_proto_goTypes, + DependencyIndexes: file_cosmos_poolrebalancer_v1_query_proto_depIdxs, + MessageInfos: file_cosmos_poolrebalancer_v1_query_proto_msgTypes, + }.Build() + File_cosmos_poolrebalancer_v1_query_proto = out.File + file_cosmos_poolrebalancer_v1_query_proto_rawDesc = nil + file_cosmos_poolrebalancer_v1_query_proto_goTypes = nil + file_cosmos_poolrebalancer_v1_query_proto_depIdxs = nil +} diff --git a/api/cosmos/poolrebalancer/v1/query_grpc.pb.go b/api/cosmos/poolrebalancer/v1/query_grpc.pb.go new file mode 100644 index 00000000..37b66107 --- /dev/null +++ b/api/cosmos/poolrebalancer/v1/query_grpc.pb.go @@ -0,0 +1,150 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: cosmos/poolrebalancer/v1/query.proto + +package poolrebalancerv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + Query_Params_FullMethodName = "/cosmos.poolrebalancer.v1.Query/Params" + Query_PendingRedelegations_FullMethodName = "/cosmos.poolrebalancer.v1.Query/PendingRedelegations" +) + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type QueryClient interface { + // Params returns the poolrebalancer module params. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) + // PendingRedelegations returns tracked in-flight redelegations. + PendingRedelegations(ctx context.Context, in *QueryPendingRedelegationsRequest, opts ...grpc.CallOption) (*QueryPendingRedelegationsResponse, error) +} + +type queryClient struct { + cc grpc.ClientConnInterface +} + +func NewQueryClient(cc grpc.ClientConnInterface) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, Query_Params_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) PendingRedelegations(ctx context.Context, in *QueryPendingRedelegationsRequest, opts ...grpc.CallOption) (*QueryPendingRedelegationsResponse, error) { + out := new(QueryPendingRedelegationsResponse) + err := c.cc.Invoke(ctx, Query_PendingRedelegations_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +// All implementations must embed UnimplementedQueryServer +// for forward compatibility +type QueryServer interface { + // Params returns the poolrebalancer module params. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) + // PendingRedelegations returns tracked in-flight redelegations. + PendingRedelegations(context.Context, *QueryPendingRedelegationsRequest) (*QueryPendingRedelegationsResponse, error) + mustEmbedUnimplementedQueryServer() +} + +// UnimplementedQueryServer must be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (UnimplementedQueryServer) Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} +func (UnimplementedQueryServer) PendingRedelegations(context.Context, *QueryPendingRedelegationsRequest) (*QueryPendingRedelegationsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PendingRedelegations not implemented") +} +func (UnimplementedQueryServer) mustEmbedUnimplementedQueryServer() {} + +// UnsafeQueryServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to QueryServer will +// result in compilation errors. +type UnsafeQueryServer interface { + mustEmbedUnimplementedQueryServer() +} + +func RegisterQueryServer(s grpc.ServiceRegistrar, srv QueryServer) { + s.RegisterService(&Query_ServiceDesc, srv) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Query_Params_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_PendingRedelegations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryPendingRedelegationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).PendingRedelegations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Query_PendingRedelegations_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).PendingRedelegations(ctx, req.(*QueryPendingRedelegationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Query_ServiceDesc is the grpc.ServiceDesc for Query service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Query_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "cosmos.poolrebalancer.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + { + MethodName: "PendingRedelegations", + Handler: _Query_PendingRedelegations_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cosmos/poolrebalancer/v1/query.proto", +} diff --git a/api/cosmos/poolrebalancer/v1/tx.pulsar.go b/api/cosmos/poolrebalancer/v1/tx.pulsar.go new file mode 100644 index 00000000..b2065bdd --- /dev/null +++ b/api/cosmos/poolrebalancer/v1/tx.pulsar.go @@ -0,0 +1,1095 @@ +// Code generated by protoc-gen-go-pulsar. DO NOT EDIT. +package poolrebalancerv1 + +import ( + _ "cosmossdk.io/api/amino" + _ "cosmossdk.io/api/cosmos/msg/v1" + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + runtime "github.com/cosmos/cosmos-proto/runtime" + _ "github.com/cosmos/gogoproto/gogoproto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoiface "google.golang.org/protobuf/runtime/protoiface" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + reflect "reflect" + sync "sync" +) + +var ( + md_MsgUpdateParams protoreflect.MessageDescriptor + fd_MsgUpdateParams_authority protoreflect.FieldDescriptor + fd_MsgUpdateParams_params protoreflect.FieldDescriptor +) + +func init() { + file_cosmos_poolrebalancer_v1_tx_proto_init() + md_MsgUpdateParams = File_cosmos_poolrebalancer_v1_tx_proto.Messages().ByName("MsgUpdateParams") + fd_MsgUpdateParams_authority = md_MsgUpdateParams.Fields().ByName("authority") + fd_MsgUpdateParams_params = md_MsgUpdateParams.Fields().ByName("params") +} + +var _ protoreflect.Message = (*fastReflection_MsgUpdateParams)(nil) + +type fastReflection_MsgUpdateParams MsgUpdateParams + +func (x *MsgUpdateParams) ProtoReflect() protoreflect.Message { + return (*fastReflection_MsgUpdateParams)(x) +} + +func (x *MsgUpdateParams) slowProtoReflect() protoreflect.Message { + mi := &file_cosmos_poolrebalancer_v1_tx_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_MsgUpdateParams_messageType fastReflection_MsgUpdateParams_messageType +var _ protoreflect.MessageType = fastReflection_MsgUpdateParams_messageType{} + +type fastReflection_MsgUpdateParams_messageType struct{} + +func (x fastReflection_MsgUpdateParams_messageType) Zero() protoreflect.Message { + return (*fastReflection_MsgUpdateParams)(nil) +} +func (x fastReflection_MsgUpdateParams_messageType) New() protoreflect.Message { + return new(fastReflection_MsgUpdateParams) +} +func (x fastReflection_MsgUpdateParams_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_MsgUpdateParams +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_MsgUpdateParams) Descriptor() protoreflect.MessageDescriptor { + return md_MsgUpdateParams +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_MsgUpdateParams) Type() protoreflect.MessageType { + return _fastReflection_MsgUpdateParams_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_MsgUpdateParams) New() protoreflect.Message { + return new(fastReflection_MsgUpdateParams) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_MsgUpdateParams) Interface() protoreflect.ProtoMessage { + return (*MsgUpdateParams)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_MsgUpdateParams) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.Authority != "" { + value := protoreflect.ValueOfString(x.Authority) + if !f(fd_MsgUpdateParams_authority, value) { + return + } + } + if x.Params != nil { + value := protoreflect.ValueOfMessage(x.Params.ProtoReflect()) + if !f(fd_MsgUpdateParams_params, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_MsgUpdateParams) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.MsgUpdateParams.authority": + return x.Authority != "" + case "cosmos.poolrebalancer.v1.MsgUpdateParams.params": + return x.Params != nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.MsgUpdateParams")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.MsgUpdateParams does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgUpdateParams) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.MsgUpdateParams.authority": + x.Authority = "" + case "cosmos.poolrebalancer.v1.MsgUpdateParams.params": + x.Params = nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.MsgUpdateParams")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.MsgUpdateParams does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_MsgUpdateParams) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "cosmos.poolrebalancer.v1.MsgUpdateParams.authority": + value := x.Authority + return protoreflect.ValueOfString(value) + case "cosmos.poolrebalancer.v1.MsgUpdateParams.params": + value := x.Params + return protoreflect.ValueOfMessage(value.ProtoReflect()) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.MsgUpdateParams")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.MsgUpdateParams does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgUpdateParams) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.MsgUpdateParams.authority": + x.Authority = value.Interface().(string) + case "cosmos.poolrebalancer.v1.MsgUpdateParams.params": + x.Params = value.Message().Interface().(*Params) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.MsgUpdateParams")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.MsgUpdateParams does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgUpdateParams) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.MsgUpdateParams.params": + if x.Params == nil { + x.Params = new(Params) + } + return protoreflect.ValueOfMessage(x.Params.ProtoReflect()) + case "cosmos.poolrebalancer.v1.MsgUpdateParams.authority": + panic(fmt.Errorf("field authority of message cosmos.poolrebalancer.v1.MsgUpdateParams is not mutable")) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.MsgUpdateParams")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.MsgUpdateParams does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_MsgUpdateParams) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.poolrebalancer.v1.MsgUpdateParams.authority": + return protoreflect.ValueOfString("") + case "cosmos.poolrebalancer.v1.MsgUpdateParams.params": + m := new(Params) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.MsgUpdateParams")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.MsgUpdateParams does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_MsgUpdateParams) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in cosmos.poolrebalancer.v1.MsgUpdateParams", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_MsgUpdateParams) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgUpdateParams) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_MsgUpdateParams) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_MsgUpdateParams) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*MsgUpdateParams) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + l = len(x.Authority) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.Params != nil { + l = options.Size(x.Params) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*MsgUpdateParams) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if x.Params != nil { + encoded, err := options.Marshal(x.Params) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x12 + } + if len(x.Authority) > 0 { + i -= len(x.Authority) + copy(dAtA[i:], x.Authority) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Authority))) + i-- + dAtA[i] = 0xa + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*MsgUpdateParams) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgUpdateParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgUpdateParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.Params == nil { + x.Params = &Params{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.Params); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +var ( + md_MsgUpdateParamsResponse protoreflect.MessageDescriptor +) + +func init() { + file_cosmos_poolrebalancer_v1_tx_proto_init() + md_MsgUpdateParamsResponse = File_cosmos_poolrebalancer_v1_tx_proto.Messages().ByName("MsgUpdateParamsResponse") +} + +var _ protoreflect.Message = (*fastReflection_MsgUpdateParamsResponse)(nil) + +type fastReflection_MsgUpdateParamsResponse MsgUpdateParamsResponse + +func (x *MsgUpdateParamsResponse) ProtoReflect() protoreflect.Message { + return (*fastReflection_MsgUpdateParamsResponse)(x) +} + +func (x *MsgUpdateParamsResponse) slowProtoReflect() protoreflect.Message { + mi := &file_cosmos_poolrebalancer_v1_tx_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_MsgUpdateParamsResponse_messageType fastReflection_MsgUpdateParamsResponse_messageType +var _ protoreflect.MessageType = fastReflection_MsgUpdateParamsResponse_messageType{} + +type fastReflection_MsgUpdateParamsResponse_messageType struct{} + +func (x fastReflection_MsgUpdateParamsResponse_messageType) Zero() protoreflect.Message { + return (*fastReflection_MsgUpdateParamsResponse)(nil) +} +func (x fastReflection_MsgUpdateParamsResponse_messageType) New() protoreflect.Message { + return new(fastReflection_MsgUpdateParamsResponse) +} +func (x fastReflection_MsgUpdateParamsResponse_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_MsgUpdateParamsResponse +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_MsgUpdateParamsResponse) Descriptor() protoreflect.MessageDescriptor { + return md_MsgUpdateParamsResponse +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_MsgUpdateParamsResponse) Type() protoreflect.MessageType { + return _fastReflection_MsgUpdateParamsResponse_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_MsgUpdateParamsResponse) New() protoreflect.Message { + return new(fastReflection_MsgUpdateParamsResponse) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_MsgUpdateParamsResponse) Interface() protoreflect.ProtoMessage { + return (*MsgUpdateParamsResponse)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_MsgUpdateParamsResponse) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_MsgUpdateParamsResponse) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.MsgUpdateParamsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.MsgUpdateParamsResponse does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgUpdateParamsResponse) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.MsgUpdateParamsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.MsgUpdateParamsResponse does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_MsgUpdateParamsResponse) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.MsgUpdateParamsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.MsgUpdateParamsResponse does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgUpdateParamsResponse) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.MsgUpdateParamsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.MsgUpdateParamsResponse does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgUpdateParamsResponse) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.MsgUpdateParamsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.MsgUpdateParamsResponse does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_MsgUpdateParamsResponse) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.poolrebalancer.v1.MsgUpdateParamsResponse")) + } + panic(fmt.Errorf("message cosmos.poolrebalancer.v1.MsgUpdateParamsResponse does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_MsgUpdateParamsResponse) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in cosmos.poolrebalancer.v1.MsgUpdateParamsResponse", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_MsgUpdateParamsResponse) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_MsgUpdateParamsResponse) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_MsgUpdateParamsResponse) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_MsgUpdateParamsResponse) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*MsgUpdateParamsResponse) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*MsgUpdateParamsResponse) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*MsgUpdateParamsResponse) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgUpdateParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgUpdateParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.0 +// protoc (unknown) +// source: cosmos/poolrebalancer/v1/tx.proto + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// MsgUpdateParams defines a Msg for updating the x/poolrebalancer module parameters. +type MsgUpdateParams struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // params defines the x/poolrebalancer parameters to update. + // NOTE: All parameters must be supplied. + Params *Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params,omitempty"` +} + +func (x *MsgUpdateParams) Reset() { + *x = MsgUpdateParams{} + if protoimpl.UnsafeEnabled { + mi := &file_cosmos_poolrebalancer_v1_tx_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MsgUpdateParams) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MsgUpdateParams) ProtoMessage() {} + +// Deprecated: Use MsgUpdateParams.ProtoReflect.Descriptor instead. +func (*MsgUpdateParams) Descriptor() ([]byte, []int) { + return file_cosmos_poolrebalancer_v1_tx_proto_rawDescGZIP(), []int{0} +} + +func (x *MsgUpdateParams) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *MsgUpdateParams) GetParams() *Params { + if x != nil { + return x.Params + } + return nil +} + +// MsgUpdateParamsResponse defines the response structure for executing a MsgUpdateParams message. +type MsgUpdateParamsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MsgUpdateParamsResponse) Reset() { + *x = MsgUpdateParamsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_cosmos_poolrebalancer_v1_tx_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MsgUpdateParamsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MsgUpdateParamsResponse) ProtoMessage() {} + +// Deprecated: Use MsgUpdateParamsResponse.ProtoReflect.Descriptor instead. +func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) { + return file_cosmos_poolrebalancer_v1_tx_proto_rawDescGZIP(), []int{1} +} + +var File_cosmos_poolrebalancer_v1_tx_proto protoreflect.FileDescriptor + +var file_cosmos_poolrebalancer_v1_tx_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x78, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, 0x6f, 0x6f, 0x6c, + 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x11, 0x61, + 0x6d, 0x69, 0x6e, 0x6f, 0x2f, 0x61, 0x6d, 0x69, 0x6e, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x2d, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6f, 0x6c, 0x72, + 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x6d, 0x73, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x6d, + 0x73, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x67, 0x6f, 0x67, 0x6f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, + 0x6f, 0x67, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcb, 0x01, 0x0a, 0x0f, 0x4d, 0x73, + 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x36, 0x0a, + 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x18, 0xd2, 0xb4, 0x2d, 0x14, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, + 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x09, 0xc8, 0xde, 0x1f, 0x00, 0xa8, 0xe7, 0xb0, + 0x2a, 0x01, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x3a, 0x3b, 0x82, 0xe7, 0xb0, 0x2a, + 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x8a, 0xe7, 0xb0, 0x2a, 0x28, 0x63, + 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x4d, 0x73, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x4d, 0x73, 0x67, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x32, 0x7a, 0x0a, 0x03, 0x4d, 0x73, 0x67, 0x12, 0x6c, 0x0a, 0x0c, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x73, 0x6d, + 0x6f, 0x73, 0x2e, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x31, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, 0x6f, + 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x4d, 0x73, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x1a, 0x05, 0x80, 0xe7, 0xb0, 0x2a, 0x01, 0x42, 0xe5, + 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x70, 0x6f, + 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, + 0x07, 0x54, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x63, 0x6f, 0x73, 0x6d, + 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x73, + 0x6d, 0x6f, 0x73, 0x2f, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x72, 0x2f, 0x76, 0x31, 0x3b, 0x70, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x72, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x43, 0x50, 0x58, 0xaa, 0x02, 0x18, 0x43, + 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x18, 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, + 0x5c, 0x50, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5c, + 0x56, 0x31, 0xe2, 0x02, 0x24, 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x5c, 0x50, 0x6f, 0x6f, 0x6c, + 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, + 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1a, 0x43, 0x6f, 0x73, 0x6d, + 0x6f, 0x73, 0x3a, 0x3a, 0x50, 0x6f, 0x6f, 0x6c, 0x72, 0x65, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cosmos_poolrebalancer_v1_tx_proto_rawDescOnce sync.Once + file_cosmos_poolrebalancer_v1_tx_proto_rawDescData = file_cosmos_poolrebalancer_v1_tx_proto_rawDesc +) + +func file_cosmos_poolrebalancer_v1_tx_proto_rawDescGZIP() []byte { + file_cosmos_poolrebalancer_v1_tx_proto_rawDescOnce.Do(func() { + file_cosmos_poolrebalancer_v1_tx_proto_rawDescData = protoimpl.X.CompressGZIP(file_cosmos_poolrebalancer_v1_tx_proto_rawDescData) + }) + return file_cosmos_poolrebalancer_v1_tx_proto_rawDescData +} + +var file_cosmos_poolrebalancer_v1_tx_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_cosmos_poolrebalancer_v1_tx_proto_goTypes = []interface{}{ + (*MsgUpdateParams)(nil), // 0: cosmos.poolrebalancer.v1.MsgUpdateParams + (*MsgUpdateParamsResponse)(nil), // 1: cosmos.poolrebalancer.v1.MsgUpdateParamsResponse + (*Params)(nil), // 2: cosmos.poolrebalancer.v1.Params +} +var file_cosmos_poolrebalancer_v1_tx_proto_depIdxs = []int32{ + 2, // 0: cosmos.poolrebalancer.v1.MsgUpdateParams.params:type_name -> cosmos.poolrebalancer.v1.Params + 0, // 1: cosmos.poolrebalancer.v1.Msg.UpdateParams:input_type -> cosmos.poolrebalancer.v1.MsgUpdateParams + 1, // 2: cosmos.poolrebalancer.v1.Msg.UpdateParams:output_type -> cosmos.poolrebalancer.v1.MsgUpdateParamsResponse + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_cosmos_poolrebalancer_v1_tx_proto_init() } +func file_cosmos_poolrebalancer_v1_tx_proto_init() { + if File_cosmos_poolrebalancer_v1_tx_proto != nil { + return + } + file_cosmos_poolrebalancer_v1_poolrebalancer_proto_init() + if !protoimpl.UnsafeEnabled { + file_cosmos_poolrebalancer_v1_tx_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MsgUpdateParams); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cosmos_poolrebalancer_v1_tx_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MsgUpdateParamsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cosmos_poolrebalancer_v1_tx_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_cosmos_poolrebalancer_v1_tx_proto_goTypes, + DependencyIndexes: file_cosmos_poolrebalancer_v1_tx_proto_depIdxs, + MessageInfos: file_cosmos_poolrebalancer_v1_tx_proto_msgTypes, + }.Build() + File_cosmos_poolrebalancer_v1_tx_proto = out.File + file_cosmos_poolrebalancer_v1_tx_proto_rawDesc = nil + file_cosmos_poolrebalancer_v1_tx_proto_goTypes = nil + file_cosmos_poolrebalancer_v1_tx_proto_depIdxs = nil +} diff --git a/api/cosmos/poolrebalancer/v1/tx_grpc.pb.go b/api/cosmos/poolrebalancer/v1/tx_grpc.pb.go new file mode 100644 index 00000000..3ec45d6f --- /dev/null +++ b/api/cosmos/poolrebalancer/v1/tx_grpc.pb.go @@ -0,0 +1,113 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: cosmos/poolrebalancer/v1/tx.proto + +package poolrebalancerv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + Msg_UpdateParams_FullMethodName = "/cosmos.poolrebalancer.v1.Msg/UpdateParams" +) + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type MsgClient interface { + // UpdateParams is a governance operation for updating the x/poolrebalancer module parameters. + // The authority is the Cosmos SDK x/gov module account. + UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) +} + +type msgClient struct { + cc grpc.ClientConnInterface +} + +func NewMsgClient(cc grpc.ClientConnInterface) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) { + out := new(MsgUpdateParamsResponse) + err := c.cc.Invoke(ctx, Msg_UpdateParams_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +// All implementations must embed UnimplementedMsgServer +// for forward compatibility +type MsgServer interface { + // UpdateParams is a governance operation for updating the x/poolrebalancer module parameters. + // The authority is the Cosmos SDK x/gov module account. + UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) + mustEmbedUnimplementedMsgServer() +} + +// UnimplementedMsgServer must be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (UnimplementedMsgServer) UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented") +} +func (UnimplementedMsgServer) mustEmbedUnimplementedMsgServer() {} + +// UnsafeMsgServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to MsgServer will +// result in compilation errors. +type UnsafeMsgServer interface { + mustEmbedUnimplementedMsgServer() +} + +func RegisterMsgServer(s grpc.ServiceRegistrar, srv MsgServer) { + s.RegisterService(&Msg_ServiceDesc, srv) +} + +func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateParams) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateParams(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Msg_UpdateParams_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateParams(ctx, req.(*MsgUpdateParams)) + } + return interceptor(ctx, in, info, handler) +} + +// Msg_ServiceDesc is the grpc.ServiceDesc for Msg service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Msg_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "cosmos.poolrebalancer.v1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UpdateParams", + Handler: _Msg_UpdateParams_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cosmos/poolrebalancer/v1/tx.proto", +} diff --git a/contracts/.gitignore b/contracts/.gitignore index 6f2ad389..22a63a26 100644 --- a/contracts/.gitignore +++ b/contracts/.gitignore @@ -1,6 +1,9 @@ # Compiled contracts artifacts/ +# Foundry (`forge test` / `forge build` in contracts/) +out/ + # Cached files cache/ diff --git a/contracts/community_pool.go b/contracts/community_pool.go new file mode 100644 index 00000000..076b2c23 --- /dev/null +++ b/contracts/community_pool.go @@ -0,0 +1,11 @@ +package contracts + +import ( + contractutils "github.com/cosmos/evm/contracts/utils" + evmtypes "github.com/cosmos/evm/x/vm/types" +) + +// LoadCommunityPool loads the compiled CommunityPool contract artifact. +func LoadCommunityPool() (evmtypes.CompiledContract, error) { + return contractutils.LoadContractFromJSONFile("solidity/pool/CommunityPool.json") +} diff --git a/contracts/community_pool_test.go b/contracts/community_pool_test.go new file mode 100644 index 00000000..d711c06a --- /dev/null +++ b/contracts/community_pool_test.go @@ -0,0 +1,25 @@ +package contracts + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// Committed CommunityPool artifact must expose reconcile and view methods used by poolrebalancer. +func TestLoadCommunityPool_IncludesReconcileMethods(t *testing.T) { + t.Parallel() + c, err := LoadCommunityPool() + require.NoError(t, err) + require.NotEmpty(t, c.Bin) + _, ok := c.ABI.Methods["reconcileTotalStaked"] + require.True(t, ok, "artifact ABI should include reconcileTotalStaked") + _, ok = c.ABI.Methods["totalStaked"] + require.True(t, ok, "artifact ABI should include totalStaked getter") + _, ok = c.ABI.Methods["creditStakeableFromRebalance"] + require.False(t, ok, "artifact ABI should not include creditStakeableFromRebalance") + _, ok = c.ABI.Methods["reconcileStakedBuckets"] + require.False(t, ok, "artifact ABI should not include reconcileStakedBuckets") + _, ok = c.ABI.Methods["pendingRebalanceUnbondReserve"] + require.False(t, ok, "artifact ABI should not include pendingRebalanceUnbondReserve getter") +} diff --git a/contracts/foundry.toml b/contracts/foundry.toml new file mode 100644 index 00000000..d0bc2335 --- /dev/null +++ b/contracts/foundry.toml @@ -0,0 +1,18 @@ +# Scope to pool + tests only so older Solidity under solidity/x/ does not force multi-solc resolution. +[profile.default] +src = "solidity/pool" +test = "test/pool" +# Avoid scanning all of node_modules (Hardhat templates pin ^0.8.28 and break solc resolution). +# forge-std: reuse submodule tests/evm-tools-compatibility/foundry/lib/forge-std +# (git submodule update --init tests/evm-tools-compatibility/foundry/lib/forge-std). +libs = [ + "../tests/evm-tools-compatibility/foundry/lib/forge-std", + "node_modules/@openzeppelin/contracts", +] +solc_version = "0.8.20" +evm_version = "paris" + +remappings = [ + "forge-std/=../tests/evm-tools-compatibility/foundry/lib/forge-std/src/", + "@openzeppelin/contracts/=node_modules/@openzeppelin/contracts/", +] diff --git a/contracts/package-lock.json b/contracts/package-lock.json index 325711df..23d755d0 100644 --- a/contracts/package-lock.json +++ b/contracts/package-lock.json @@ -12,7 +12,7 @@ "@account-abstraction/contracts": "^0.6.0" }, "devDependencies": { - "@openzeppelin/contracts": "^5.4.0", + "@openzeppelin/contracts": "4.9.6", "hardhat": "^3.0.15" } }, @@ -802,9 +802,9 @@ } }, "node_modules/@openzeppelin/contracts": { - "version": "5.4.0", - "resolved": "https://registry.npmjs.org/@openzeppelin/contracts/-/contracts-5.4.0.tgz", - "integrity": "sha512-eCYgWnLg6WO+X52I16TZt8uEjbtdkgLC0SUX/xnAksjjrQI4Xfn4iBRoI5j55dmlOhDv1Y7BoR3cU7e3WWhC6A==", + "version": "4.9.6", + "resolved": "https://registry.npmjs.org/@openzeppelin/contracts/-/contracts-4.9.6.tgz", + "integrity": "sha512-xSmezSupL+y9VkHZJGDoCBpmnB2ogM13ccaYDWqJTfS3dbuHkgjuwDFUmaFauBCboQMGB/S5UqUl2y54X99BmA==", "dev": true, "license": "MIT" }, diff --git a/contracts/package.json b/contracts/package.json index d49eca81..9fe2d9c8 100644 --- a/contracts/package.json +++ b/contracts/package.json @@ -7,7 +7,8 @@ "hardhat": "^3.0.15" }, "scripts": { - "test": "echo \"Error: no test specified\" && exit 1" + "test": "echo \"Error: no test specified\" && exit 1", + "test:solidity": "hardhat test solidity" }, "repository": { "type": "git", diff --git a/contracts/solidity/pool/CommunityPool.json b/contracts/solidity/pool/CommunityPool.json new file mode 100644 index 00000000..cc7c72e3 --- /dev/null +++ b/contracts/solidity/pool/CommunityPool.json @@ -0,0 +1,1106 @@ +{ + "_format": "hh3-artifact-1", + "contractName": "CommunityPool", + "sourceName": "solidity/pool/CommunityPool.sol", + "abi": [ + { + "inputs": [ + { + "internalType": "address", + "name": "bondToken_", + "type": "address" + }, + { + "internalType": "uint32", + "name": "maxRetrieve_", + "type": "uint32" + }, + { + "internalType": "uint32", + "name": "maxValidators_", + "type": "uint32" + }, + { + "internalType": "uint256", + "name": "minStakeAmount_", + "type": "uint256" + }, + { + "internalType": "address", + "name": "owner_", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "EmptyPool", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "stakeablePrincipal", + "type": "uint256" + } + ], + "name": "FullExitLeavesNonStakedPrincipal", + "type": "error" + }, + { + "inputs": [], + "name": "HarvestFailed", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "requested", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "available", + "type": "uint256" + } + ], + "name": "InsufficientLiquid", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidAddress", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidAmount", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "int64", + "name": "completionTime", + "type": "int64" + }, + { + "internalType": "uint64", + "name": "currentTime", + "type": "uint64" + } + ], + "name": "InvalidCompletionTime", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidConfig", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidRequest", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidUnits", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "reservedAmount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "liquidBalance", + "type": "uint256" + } + ], + "name": "LiquidReserveInvariantViolation", + "type": "error" + }, + { + "inputs": [], + "name": "RequestAlreadyClaimed", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "maturityTime", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "currentTime", + "type": "uint64" + } + ], + "name": "RequestNotMatured", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "rewardReserve", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "liquidBalance", + "type": "uint256" + } + ], + "name": "RewardReserveInvariantViolation", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "accountedLiquid", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "liquidBalance", + "type": "uint256" + } + ], + "name": "StakeablePrincipalInvariantViolation", + "type": "error" + }, + { + "inputs": [], + "name": "TokenTransferFailed", + "type": "error" + }, + { + "inputs": [], + "name": "TokenTransferFromFailed", + "type": "error" + }, + { + "inputs": [], + "name": "Unauthorized", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "requested", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "undelegated", + "type": "uint256" + } + ], + "name": "UnexpectedUndelegatedAmount", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "stakeablePrincipal", + "type": "uint256" + } + ], + "name": "WithdrawRequiresAllPrincipalBonded", + "type": "error" + }, + { + "inputs": [], + "name": "ZeroMintedUnits", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "principalAssetsBefore", + "type": "uint256" + } + ], + "name": "ZeroUnitsWithPrincipalAssets", + "type": "error" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "previousCaller", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "newCaller", + "type": "address" + } + ], + "name": "AutomationCallerUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint32", + "name": "maxRetrieve", + "type": "uint32" + }, + { + "indexed": false, + "internalType": "uint32", + "name": "maxValidators", + "type": "uint32" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "minStakeAmount", + "type": "uint256" + } + ], + "name": "ConfigUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "user", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "mintedUnits", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "totalUnitsAfter", + "type": "uint256" + } + ], + "name": "Deposit", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint256", + "name": "liquidBefore", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "liquidAfter", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "harvestedAmount", + "type": "uint256" + } + ], + "name": "Harvest", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "previousOwner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "OwnershipTransferred", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint256", + "name": "harvestedAmount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "accRewardPerUnit", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "rewardReserve", + "type": "uint256" + } + ], + "name": "RewardIndexUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "user", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "RewardsClaimed", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint256", + "name": "liquidBefore", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "delegatedAmount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "validatorsCount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "totalStakedAfter", + "type": "uint256" + } + ], + "name": "Stake", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint256", + "name": "previousTotalStaked", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "newTotalStaked", + "type": "uint256" + } + ], + "name": "TotalStakedReconciled", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint256", + "name": "previousTotalStaked", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "newTotalStaked", + "type": "uint256" + } + ], + "name": "TotalStakedSynced", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "user", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "requestId", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amountOut", + "type": "uint256" + } + ], + "name": "WithdrawClaimed", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "user", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "requestId", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "units", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amountOut", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint64", + "name": "maturityTime", + "type": "uint64" + } + ], + "name": "WithdrawRequested", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "requestId", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amountOut", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "pendingWithdrawReserveAfter", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "maturedWithdrawReserveAfter", + "type": "uint256" + } + ], + "name": "WithdrawReserveMoved", + "type": "event" + }, + { + "inputs": [], + "name": "PRECISION", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "accRewardPerUnit", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "automationCaller", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "bondToken", + "outputs": [ + { + "internalType": "contract IERC20", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "claimRewards", + "outputs": [ + { + "internalType": "uint256", + "name": "claimedAmount", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "requestId", + "type": "uint256" + } + ], + "name": "claimWithdraw", + "outputs": [ + { + "internalType": "uint256", + "name": "amountOut", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "deposit", + "outputs": [ + { + "internalType": "uint256", + "name": "mintedUnits", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "harvest", + "outputs": [ + { + "internalType": "uint256", + "name": "harvestedAmount", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "liquidBalance", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "maturedWithdrawReserve", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "maxRetrieve", + "outputs": [ + { + "internalType": "uint32", + "name": "", + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "maxValidators", + "outputs": [ + { + "internalType": "uint32", + "name": "", + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "minStakeAmount", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "nextWithdrawRequestId", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "owner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "pendingWithdrawReserve", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "pricePerUnit", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "principalAssets", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "principalLiquid", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "newTotalStaked", + "type": "uint256" + } + ], + "name": "reconcileTotalStaked", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "rewardDebt", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "rewardReserve", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "newAutomationCaller", + "type": "address" + } + ], + "name": "setAutomationCaller", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint32", + "name": "newMaxRetrieve", + "type": "uint32" + }, + { + "internalType": "uint32", + "name": "newMaxValidators", + "type": "uint32" + }, + { + "internalType": "uint256", + "name": "newMinStakeAmount", + "type": "uint256" + } + ], + "name": "setConfig", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "stake", + "outputs": [ + { + "internalType": "uint256", + "name": "delegatedAmount", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "stakeablePrincipalLedger", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "newTotalStaked", + "type": "uint256" + } + ], + "name": "syncTotalStaked", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "totalStaked", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "totalUnits", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "totalWithdrawCommitments", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "transferOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "unitsOf", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "userUnits", + "type": "uint256" + } + ], + "name": "withdraw", + "outputs": [ + { + "internalType": "uint256", + "name": "requestId", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "withdrawRequests", + "outputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amountOut", + "type": "uint256" + }, + { + "internalType": "uint64", + "name": "maturityTime", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "reserveMoved", + "type": "bool" + }, + { + "internalType": "bool", + "name": "claimed", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + } + ], + "bytecode": "0x60a0346200015557601f62001a4238819003918201601f19168301916001600160401b03831184841017620001595780849260a09460405283398101031262000155576200004d816200016d565b906200005c6020820162000182565b906200006b6040820162000182565b916200007f6080606084015193016200016d565b60016009556001600160a01b03949093908516801580156200014a575b620001385763ffffffff9081831615620001265760805267ffffffff00000000600a549260201b1692169060018060401b0319161717600a55600b551660018060a01b031981815f5416175f5560015416176001556040516118ad90816200019582396080518181816103a9015281816104c40152818161073f0152818161139d015261170b0152f35b6040516306b7c75960e31b8152600490fd5b60405163e6c4247b60e01b8152600490fd5b50858516156200009c565b5f80fd5b634e487b7160e01b5f52604160045260245ffd5b51906001600160a01b03821682036200015557565b519063ffffffff82168203620001555756fe6080604081815260049182361015610015575f80fd5b5f92833560e01c91826308ac525614611328575081630eccc708146112ee5781630ed61edb146112ca5781631a0a253c146111ff5781632e1a7d4d14610e67578163372500ab14610e375781633a4b66f114610dd35781634641257d14610b785781635873eb9b14610b3e5781636d86acc414610b1f5781636f62018514610b005781637bfe7d5714610ae1578163817b1cd214610ac257816383810d1d14610a485781638ca8210814610a295781638da5cb5b14610a01578163992a7dfb14610996578163a8c7914714610934578163aaf5eb6814610911578163b13acedd14610644578163b6b55f251461043e578163b7ec1a3314610421578163bae80594146103fd578163bbe9a070146103d8578163c28f439214610394578163cab64bcd14610375578163d5f884a114610356578163da1575a4146102d7578163dacd7e0c146102b9578163e66825c314610295578163f188768414610276578163f2fde38b146101e157508063f74bcf29146101c35763fa303a5314610198575f80fd5b346101bf57816003193601126101bf5760015490516001600160a01b039091168152602090f35b5080fd5b50346101bf57816003193601126101bf576020906006549051908152f35b91905034610272576020366003190112610272576001600160a01b038235818116939084900361026e578454918216928333036102625784156102555750506001600160a01b031916821783557f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e08380a380f35b5163e6c4247b60e01b8152fd5b516282b42960e81b8152fd5b8480fd5b8280fd5b5050346101bf57816003193601126101bf57602090600b549051908152f35b5050346101bf57816003193601126101bf576020906102b2611463565b9051908152f35b90503461027257826003193601126102725760209250549051908152f35b905034610272576020366003190112610272578035906102f9600f54156114af565b6001546001600160a01b031633036103495750907f3c56cbf69c07e5656b670353b54706fabc7b858dcb7f088b50331ed8b00b772291600354908060035582519182526020820152a180600f5580f35b82516282b42960e81b8152fd5b5050346101bf57816003193601126101bf576020906007549051908152f35b5050346101bf57816003193601126101bf576020906005549051908152f35b5050346101bf57816003193601126101bf57517f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03168152602090f35b5050346101bf57816003193601126101bf5760209063ffffffff600a54169051908152f35b5050346101bf57816003193601126101bf576020906102b260065460035490611411565b5050346101bf57816003193601126101bf576020906102b2611382565b919050346102725760209283600319360112610641578235610462600f54156114af565b6001600f5580156106325761047633611662565b5061048660065460035490611411565b60025480610619575080610603575080935b84156105f55783516323b872dd60e01b81523382820152306024820152604481018390528681606481877f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165af19081156105eb5784916105be575b50156105b057610557670de0b6b3a76400009161051b84600654611411565b600655338552600c8852858520610533888254611411565b905561054187600254611411565b600255338552600c885285852054905490611432565b04338352600d86528383205561056b6117b5565b600254835191825260208201859052604082015233907f36af321ec8d3c75236829c5317affd40ddb308863a1236d2d277a4025cccee1e90606090a2600f5551908152f35b835163be24f3c560e01b8152fd5b6105de9150873d89116105e4575b6105d6818361134c565b8101906114e8565b5f6104fc565b503d6105cc565b85513d86823e3d90fd5b8351639345f64b60e01b8152fd5b846024918551916336dda71960e21b8352820152fd5b9061062761062c9284611432565b611445565b93610498565b50505163162908e360e11b8152fd5b80fd5b919050346102725760209283600319360112610641578235610668600f54156114af565b6001600f55808252600e855282822080546001600160a01b03959190861680156109015733036108f35760028101805460ff8160481c166108e35767ffffffffffffffff8042169082168082106108c7575050861c60ff1615610814575b805460ff60481b1916600160481b179055600101546008549095908087116107f8576106fc6106f3611382565b60055490611515565b8088116107dc57508661070e91611515565b600855845163a9059cbb60e01b815233838201908152602081018890529091889183919082908890829060400103927f0000000000000000000000000000000000000000000000000000000000000000165af19081156105eb5784916107bf575b50156107b1575061077e6117b5565b82518481527f5b9b0bc34c7f6a61889ce3382d8697cc823f00d6e619362ae6b156bc8ee3ad46863392a3600f5551908152f35b835163022e258160e11b8152fd5b6107d69150873d89116105e4576105d6818361134c565b5f61076f565b83604491898951926382b3a56560e01b84528301526024820152fd5b82604491888851926382b3a56560e01b84528301526024820152fd5b6001820180546007548082116108aa57600194939261085588937fe303da04bf6b13e3562ddfe787f074bcf5855167c6667376cadc0e0d5706eeee93611515565b6007556108658154600854611411565b6008558560401b60ff60401b19855416178455546007549061089f6008548c51938493846040919493926060820195825260208201520152565b0390a29091506106c6565b88516382b3a56560e01b8152808701929092526024820152604490fd5b60449186918a5192633760603560e21b84528301526024820152fd5b86516354e19feb60e01b81528490fd5b5083516282b42960e81b8152fd5b85516341abc80160e01b81528390fd5b5050346101bf57816003193601126101bf5760209051670de0b6b3a76400008152f35b905034610272576020366003190112610272578254813591906001600160a01b031633036103495750907f0e6c1ecf62bc9f6c26287bb6a3404d50dd44446d71506263b3bcf5393cc8f74a91600354908060035582519182526020820152a180f35b905034610272576020366003190112610272578160a09360ff92358152600e602052209181600180861b0384541693600260018201549101549283918151968752602087015267ffffffffffffffff8216818701521c161515606084015260481c1615156080820152f35b5050346101bf57816003193601126101bf57905490516001600160a01b039091168152602090f35b5050346101bf57816003193601126101bf576020906009549051908152f35b91905034610272576020366003190112610272576001600160a01b0382358181169391929084900361026e57828554163303610262578315610255575050600180546001600160a01b031981168417909155167f9b6cbce723aab630d07d7af8531985c84625fa96e4c7405835f8d8ca53b5bb498380a380f35b5050346101bf57816003193601126101bf576020906003549051908152f35b5050346101bf57816003193601126101bf576020906008549051908152f35b5050346101bf57816003193601126101bf576020906006549051908152f35b5050346101bf57816003193601126101bf576020906002549051908152f35b90503461027257602036600319011261027257356001600160a01b038116908190036102725782829160209452600d845220549051908152f35b8383346101bf57816003193601126101bf57610b96600f54156114af565b6001600f558154336001600160a01b0391821614159081610dc4575b50610db65760025415610da757610bc7611382565b9163ffffffff600a5416825190632efe8a5f60e01b825230868301526024820152602081604481856108015af1908115610d9d578291610d7f575b5015610d6f57610c10611382565b83811115610d6857610c228482611515565b935b84158015610c6b575b5060209550905f8051602061185883398151915291610c4a6117b5565b8451908152602081019190915260408101859052606090a1600f5551908152f35b610c7786600554611411565b90816005556002549081610ce8575b5050907f4ca31b8f435df10a9cb69690e4af29947006cdd5ad35a27a2075cf262c5926915f80516020611858833981519152939260209854610cde88519283928b846040919493926060820195825260208201520152565b0390a19091610c2d565b670de0b6b3a76400009081890291898304141715610d555791602098610d49610d427f4ca31b8f435df10a9cb69690e4af29947006cdd5ad35a27a2075cf262c592691945f80516020611858833981519152989796611445565b8254611411565b81559850919293610c86565b634e487b7160e01b865260118952602486fd5b8193610c24565b8151630d599dd960e11b81528490fd5b610d97915060203d81116105e4576105d6818361134c565b85610c02565b83513d84823e3d90fd5b51631107712560e01b81529050fd5b516282b42960e81b81529050fd5b90506001541633141584610bb2565b8383346101bf57816003193601126101bf57610df1600f54156114af565b6001600f558154336001600160a01b0391821614159081610e28575b50610db65760209250610e1e611522565b91600f5551908152f35b90506001541633141584610e0d565b5050346101bf57816003193601126101bf5790602091610e59600f54156114af565b6001600f55610e1e33611662565b919050346102725760209283600319360112610641578235610e8b600f54156114af565b6001600f5580156111f057610e9f33611662565b50338252600c85528282205480821180156111e6575b6111d657600654600254908382036111b1578061119b5750610ede905b61062760035485611432565b90811561118b57600a548551633991e9e560e11b8152308189019081526020810185905291891c63ffffffff16604083015260609392909167ffffffffffffffff919042831690869085908190830103818b6108005af1938415611181578890899561112e575b5085810361111257508360070b888113801590611107575b6110eb57505085610f6d91611515565b338752600c8a5287872055610f8485600254611515565b600255610f9383600354611515565b600355610fa283600754611411565b600755338652600c8952670de0b6b3a7640000610fc4888820548a5490611432565b04338752600d8a5287872055610fd86117b5565b600954975f1989146110d8576001890160095587519060a0820190828210848311176110c55750928a8a899460027f3310f1d43f4f9df9a267a6a9da8bd7ab75ac0e320a65fc3405d43a0ca97c5ea19895839b9a988e523381528d85820190888252848184019816998a8952600e8c850198828a526080860198838a52835252209160018060a01b0390511660018060a01b03198354161782555160018201550193511683549260ff60401b905115158d1b169160ff60481b9051151560481b169269ffffffffffffffffffff19161717179055875194855289850152868401523392a3600f5551908152f35b634e487b7160e01b895260419052602488fd5b634e487b7160e01b875260119052602486fd5b6044918b918b519263158e5da560e11b84528301526024820152fd5b508184861610610f5d565b8a604491878c5192633a54e96d60e21b84528301526024820152fd5b809550878092503d831161117a575b611147818361134c565b81010312611176578884519461115e8d8201611504565b500151938460070b8503611172575f610f45565b8880fd5b8780fd5b503d61113d565b89513d8a823e3d90fd5b845163162908e360e11b81528690fd5b86602491875191632781f19760e11b8352820152fd5b806111c05750610ede90610ed2565b8660249187519163906361f760e01b8352820152fd5b50505051630e433c2360e31b8152fd5b5060025415610eb5565b505051630e433c2360e31b8152fd5b9050346102725760603660031901126102725780359163ffffffff80841680940361026e57602435908116908181036112c657855460443594906001600160a01b031633036112b95782156112ab5750600a805467ffffffffffffffff19168617602092831b67ffffffff0000000016179055600b84905582519485528401528201527f7d10c2f08263d04ad9c37d1a4368c63e1f087bbe9d13c792e72a112cc37aef8f90606090a180f35b83516306b7c75960e31b8152fd5b83516282b42960e81b8152fd5b8580fd5b5050346101bf57816003193601126101bf576020906102b260075460085490611411565b90503461027257602036600319011261027257356001600160a01b038116908190036102725782829160209452600c845220549051908152f35b8490346101bf57816003193601126101bf5760209063ffffffff600a54831c168152f35b90601f8019910116810190811067ffffffffffffffff82111761136e57604052565b634e487b7160e01b5f52604160045260245ffd5b6040516370a0823160e01b81523060048201526020816024817f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165afa908115611406575f916113d8575090565b906020823d82116113fe575b816113f16020938361134c565b8101031261064157505190565b3d91506113e4565b6040513d5f823e3d90fd5b9190820180921161141e57565b634e487b7160e01b5f52601160045260245ffd5b8181029291811591840414171561141e57565b811561144f570490565b634e487b7160e01b5f52601260045260245ffd5b60025480156114a25761147b60065460035490611411565b90670de0b6b3a76400009182810292818404149015171561141e5761149f91611445565b90565b50670de0b6b3a764000090565b156114b657565b60405162461bcd60e51b815260206004820152600a6024820152697265656e7472616e637960b01b6044820152606490fd5b90816020910312611500575180151581036115005790565b5f80fd5b519063ffffffff8216820361150057565b9190820391821161141e57565b60065490600b54821061165d57600a546040805162141ed760e41b81523060048201526024810185905263ffffffff60209390931c8316604482015293915f91908186606481866108005af1958615611651578380976115e7575b5050917f4cdda2b0641e11d4d0c953327ff68eb973718a3128f576eb86e3260df1ce45f6939160809382976115b484600654611515565b6006556115c384600354611411565b93846003556115d06117b5565b8351958652602086015216908301526060820152a1565b91965092508183813d831161164a575b611601818361134c565b81010312610641575091817f4cdda2b0641e11d4d0c953327ff68eb973718a3128f576eb86e3260df1ce45f69361163e6020608095519301611504565b9691938193955061157d565b503d6115f7565b505051903d90823e3d90fd5b5f9150565b9060018060a01b0391828116905f90828252602091600c8352604091670de0b6b3a76400006116978484205460045490611432565b04858352600d8552838320908082549255818111156117aa57916116bf869261170794611515565b9889916116ce83600554611515565b600555865163a9059cbb60e01b81526001600160a01b039091166004820152602481019290925290928391908290869082906044820190565b03927f0000000000000000000000000000000000000000000000000000000000000000165af191821561179f5791611782575b501561177257907ffc30cddea38e2bf4d6ea7d3f9ed3b6ad7f176419f4963bd81318067a4aee73fe9161176b6117b5565b51858152a2565b5163022e258160e11b8152600490fd5b6117999150833d85116105e4576105d6818361134c565b5f61173a565b8351903d90823e3d90fd5b509196505050505050565b6117bd611382565b60055481811161183a57600854906117d58282611411565b83811161181c5750906117ed6117f292600654611411565b611411565b908082116117fe575050565b6044925060405191630648624b60e21b835260048301526024820152fd5b604490846040519163c53ef3b160e01b835260048301526024820152fd5b60449160405191637843b5b360e01b835260048301526024820152fdfe4ec2d4038813a7f233af1d6d09519189db3ed5bc5b823bf72f6d3144574721dea2646970667358221220e4f6ecbd113f4b158c28d6c8b3c4ea9f9068cd642ab37c15c378be64fb4f9c2464736f6c63430008140033", + "deployedBytecode": "0x6080604081815260049182361015610015575f80fd5b5f92833560e01c91826308ac525614611328575081630eccc708146112ee5781630ed61edb146112ca5781631a0a253c146111ff5781632e1a7d4d14610e67578163372500ab14610e375781633a4b66f114610dd35781634641257d14610b785781635873eb9b14610b3e5781636d86acc414610b1f5781636f62018514610b005781637bfe7d5714610ae1578163817b1cd214610ac257816383810d1d14610a485781638ca8210814610a295781638da5cb5b14610a01578163992a7dfb14610996578163a8c7914714610934578163aaf5eb6814610911578163b13acedd14610644578163b6b55f251461043e578163b7ec1a3314610421578163bae80594146103fd578163bbe9a070146103d8578163c28f439214610394578163cab64bcd14610375578163d5f884a114610356578163da1575a4146102d7578163dacd7e0c146102b9578163e66825c314610295578163f188768414610276578163f2fde38b146101e157508063f74bcf29146101c35763fa303a5314610198575f80fd5b346101bf57816003193601126101bf5760015490516001600160a01b039091168152602090f35b5080fd5b50346101bf57816003193601126101bf576020906006549051908152f35b91905034610272576020366003190112610272576001600160a01b038235818116939084900361026e578454918216928333036102625784156102555750506001600160a01b031916821783557f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e08380a380f35b5163e6c4247b60e01b8152fd5b516282b42960e81b8152fd5b8480fd5b8280fd5b5050346101bf57816003193601126101bf57602090600b549051908152f35b5050346101bf57816003193601126101bf576020906102b2611463565b9051908152f35b90503461027257826003193601126102725760209250549051908152f35b905034610272576020366003190112610272578035906102f9600f54156114af565b6001546001600160a01b031633036103495750907f3c56cbf69c07e5656b670353b54706fabc7b858dcb7f088b50331ed8b00b772291600354908060035582519182526020820152a180600f5580f35b82516282b42960e81b8152fd5b5050346101bf57816003193601126101bf576020906007549051908152f35b5050346101bf57816003193601126101bf576020906005549051908152f35b5050346101bf57816003193601126101bf57517f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03168152602090f35b5050346101bf57816003193601126101bf5760209063ffffffff600a54169051908152f35b5050346101bf57816003193601126101bf576020906102b260065460035490611411565b5050346101bf57816003193601126101bf576020906102b2611382565b919050346102725760209283600319360112610641578235610462600f54156114af565b6001600f5580156106325761047633611662565b5061048660065460035490611411565b60025480610619575080610603575080935b84156105f55783516323b872dd60e01b81523382820152306024820152604481018390528681606481877f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165af19081156105eb5784916105be575b50156105b057610557670de0b6b3a76400009161051b84600654611411565b600655338552600c8852858520610533888254611411565b905561054187600254611411565b600255338552600c885285852054905490611432565b04338352600d86528383205561056b6117b5565b600254835191825260208201859052604082015233907f36af321ec8d3c75236829c5317affd40ddb308863a1236d2d277a4025cccee1e90606090a2600f5551908152f35b835163be24f3c560e01b8152fd5b6105de9150873d89116105e4575b6105d6818361134c565b8101906114e8565b5f6104fc565b503d6105cc565b85513d86823e3d90fd5b8351639345f64b60e01b8152fd5b846024918551916336dda71960e21b8352820152fd5b9061062761062c9284611432565b611445565b93610498565b50505163162908e360e11b8152fd5b80fd5b919050346102725760209283600319360112610641578235610668600f54156114af565b6001600f55808252600e855282822080546001600160a01b03959190861680156109015733036108f35760028101805460ff8160481c166108e35767ffffffffffffffff8042169082168082106108c7575050861c60ff1615610814575b805460ff60481b1916600160481b179055600101546008549095908087116107f8576106fc6106f3611382565b60055490611515565b8088116107dc57508661070e91611515565b600855845163a9059cbb60e01b815233838201908152602081018890529091889183919082908890829060400103927f0000000000000000000000000000000000000000000000000000000000000000165af19081156105eb5784916107bf575b50156107b1575061077e6117b5565b82518481527f5b9b0bc34c7f6a61889ce3382d8697cc823f00d6e619362ae6b156bc8ee3ad46863392a3600f5551908152f35b835163022e258160e11b8152fd5b6107d69150873d89116105e4576105d6818361134c565b5f61076f565b83604491898951926382b3a56560e01b84528301526024820152fd5b82604491888851926382b3a56560e01b84528301526024820152fd5b6001820180546007548082116108aa57600194939261085588937fe303da04bf6b13e3562ddfe787f074bcf5855167c6667376cadc0e0d5706eeee93611515565b6007556108658154600854611411565b6008558560401b60ff60401b19855416178455546007549061089f6008548c51938493846040919493926060820195825260208201520152565b0390a29091506106c6565b88516382b3a56560e01b8152808701929092526024820152604490fd5b60449186918a5192633760603560e21b84528301526024820152fd5b86516354e19feb60e01b81528490fd5b5083516282b42960e81b8152fd5b85516341abc80160e01b81528390fd5b5050346101bf57816003193601126101bf5760209051670de0b6b3a76400008152f35b905034610272576020366003190112610272578254813591906001600160a01b031633036103495750907f0e6c1ecf62bc9f6c26287bb6a3404d50dd44446d71506263b3bcf5393cc8f74a91600354908060035582519182526020820152a180f35b905034610272576020366003190112610272578160a09360ff92358152600e602052209181600180861b0384541693600260018201549101549283918151968752602087015267ffffffffffffffff8216818701521c161515606084015260481c1615156080820152f35b5050346101bf57816003193601126101bf57905490516001600160a01b039091168152602090f35b5050346101bf57816003193601126101bf576020906009549051908152f35b91905034610272576020366003190112610272576001600160a01b0382358181169391929084900361026e57828554163303610262578315610255575050600180546001600160a01b031981168417909155167f9b6cbce723aab630d07d7af8531985c84625fa96e4c7405835f8d8ca53b5bb498380a380f35b5050346101bf57816003193601126101bf576020906003549051908152f35b5050346101bf57816003193601126101bf576020906008549051908152f35b5050346101bf57816003193601126101bf576020906006549051908152f35b5050346101bf57816003193601126101bf576020906002549051908152f35b90503461027257602036600319011261027257356001600160a01b038116908190036102725782829160209452600d845220549051908152f35b8383346101bf57816003193601126101bf57610b96600f54156114af565b6001600f558154336001600160a01b0391821614159081610dc4575b50610db65760025415610da757610bc7611382565b9163ffffffff600a5416825190632efe8a5f60e01b825230868301526024820152602081604481856108015af1908115610d9d578291610d7f575b5015610d6f57610c10611382565b83811115610d6857610c228482611515565b935b84158015610c6b575b5060209550905f8051602061185883398151915291610c4a6117b5565b8451908152602081019190915260408101859052606090a1600f5551908152f35b610c7786600554611411565b90816005556002549081610ce8575b5050907f4ca31b8f435df10a9cb69690e4af29947006cdd5ad35a27a2075cf262c5926915f80516020611858833981519152939260209854610cde88519283928b846040919493926060820195825260208201520152565b0390a19091610c2d565b670de0b6b3a76400009081890291898304141715610d555791602098610d49610d427f4ca31b8f435df10a9cb69690e4af29947006cdd5ad35a27a2075cf262c592691945f80516020611858833981519152989796611445565b8254611411565b81559850919293610c86565b634e487b7160e01b865260118952602486fd5b8193610c24565b8151630d599dd960e11b81528490fd5b610d97915060203d81116105e4576105d6818361134c565b85610c02565b83513d84823e3d90fd5b51631107712560e01b81529050fd5b516282b42960e81b81529050fd5b90506001541633141584610bb2565b8383346101bf57816003193601126101bf57610df1600f54156114af565b6001600f558154336001600160a01b0391821614159081610e28575b50610db65760209250610e1e611522565b91600f5551908152f35b90506001541633141584610e0d565b5050346101bf57816003193601126101bf5790602091610e59600f54156114af565b6001600f55610e1e33611662565b919050346102725760209283600319360112610641578235610e8b600f54156114af565b6001600f5580156111f057610e9f33611662565b50338252600c85528282205480821180156111e6575b6111d657600654600254908382036111b1578061119b5750610ede905b61062760035485611432565b90811561118b57600a548551633991e9e560e11b8152308189019081526020810185905291891c63ffffffff16604083015260609392909167ffffffffffffffff919042831690869085908190830103818b6108005af1938415611181578890899561112e575b5085810361111257508360070b888113801590611107575b6110eb57505085610f6d91611515565b338752600c8a5287872055610f8485600254611515565b600255610f9383600354611515565b600355610fa283600754611411565b600755338652600c8952670de0b6b3a7640000610fc4888820548a5490611432565b04338752600d8a5287872055610fd86117b5565b600954975f1989146110d8576001890160095587519060a0820190828210848311176110c55750928a8a899460027f3310f1d43f4f9df9a267a6a9da8bd7ab75ac0e320a65fc3405d43a0ca97c5ea19895839b9a988e523381528d85820190888252848184019816998a8952600e8c850198828a526080860198838a52835252209160018060a01b0390511660018060a01b03198354161782555160018201550193511683549260ff60401b905115158d1b169160ff60481b9051151560481b169269ffffffffffffffffffff19161717179055875194855289850152868401523392a3600f5551908152f35b634e487b7160e01b895260419052602488fd5b634e487b7160e01b875260119052602486fd5b6044918b918b519263158e5da560e11b84528301526024820152fd5b508184861610610f5d565b8a604491878c5192633a54e96d60e21b84528301526024820152fd5b809550878092503d831161117a575b611147818361134c565b81010312611176578884519461115e8d8201611504565b500151938460070b8503611172575f610f45565b8880fd5b8780fd5b503d61113d565b89513d8a823e3d90fd5b845163162908e360e11b81528690fd5b86602491875191632781f19760e11b8352820152fd5b806111c05750610ede90610ed2565b8660249187519163906361f760e01b8352820152fd5b50505051630e433c2360e31b8152fd5b5060025415610eb5565b505051630e433c2360e31b8152fd5b9050346102725760603660031901126102725780359163ffffffff80841680940361026e57602435908116908181036112c657855460443594906001600160a01b031633036112b95782156112ab5750600a805467ffffffffffffffff19168617602092831b67ffffffff0000000016179055600b84905582519485528401528201527f7d10c2f08263d04ad9c37d1a4368c63e1f087bbe9d13c792e72a112cc37aef8f90606090a180f35b83516306b7c75960e31b8152fd5b83516282b42960e81b8152fd5b8580fd5b5050346101bf57816003193601126101bf576020906102b260075460085490611411565b90503461027257602036600319011261027257356001600160a01b038116908190036102725782829160209452600c845220549051908152f35b8490346101bf57816003193601126101bf5760209063ffffffff600a54831c168152f35b90601f8019910116810190811067ffffffffffffffff82111761136e57604052565b634e487b7160e01b5f52604160045260245ffd5b6040516370a0823160e01b81523060048201526020816024817f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165afa908115611406575f916113d8575090565b906020823d82116113fe575b816113f16020938361134c565b8101031261064157505190565b3d91506113e4565b6040513d5f823e3d90fd5b9190820180921161141e57565b634e487b7160e01b5f52601160045260245ffd5b8181029291811591840414171561141e57565b811561144f570490565b634e487b7160e01b5f52601260045260245ffd5b60025480156114a25761147b60065460035490611411565b90670de0b6b3a76400009182810292818404149015171561141e5761149f91611445565b90565b50670de0b6b3a764000090565b156114b657565b60405162461bcd60e51b815260206004820152600a6024820152697265656e7472616e637960b01b6044820152606490fd5b90816020910312611500575180151581036115005790565b5f80fd5b519063ffffffff8216820361150057565b9190820391821161141e57565b60065490600b54821061165d57600a546040805162141ed760e41b81523060048201526024810185905263ffffffff60209390931c8316604482015293915f91908186606481866108005af1958615611651578380976115e7575b5050917f4cdda2b0641e11d4d0c953327ff68eb973718a3128f576eb86e3260df1ce45f6939160809382976115b484600654611515565b6006556115c384600354611411565b93846003556115d06117b5565b8351958652602086015216908301526060820152a1565b91965092508183813d831161164a575b611601818361134c565b81010312610641575091817f4cdda2b0641e11d4d0c953327ff68eb973718a3128f576eb86e3260df1ce45f69361163e6020608095519301611504565b9691938193955061157d565b503d6115f7565b505051903d90823e3d90fd5b5f9150565b9060018060a01b0391828116905f90828252602091600c8352604091670de0b6b3a76400006116978484205460045490611432565b04858352600d8552838320908082549255818111156117aa57916116bf869261170794611515565b9889916116ce83600554611515565b600555865163a9059cbb60e01b81526001600160a01b039091166004820152602481019290925290928391908290869082906044820190565b03927f0000000000000000000000000000000000000000000000000000000000000000165af191821561179f5791611782575b501561177257907ffc30cddea38e2bf4d6ea7d3f9ed3b6ad7f176419f4963bd81318067a4aee73fe9161176b6117b5565b51858152a2565b5163022e258160e11b8152600490fd5b6117999150833d85116105e4576105d6818361134c565b5f61173a565b8351903d90823e3d90fd5b509196505050505050565b6117bd611382565b60055481811161183a57600854906117d58282611411565b83811161181c5750906117ed6117f292600654611411565b611411565b908082116117fe575050565b6044925060405191630648624b60e21b835260048301526024820152fd5b604490846040519163c53ef3b160e01b835260048301526024820152fd5b60449160405191637843b5b360e01b835260048301526024820152fdfe4ec2d4038813a7f233af1d6d09519189db3ed5bc5b823bf72f6d3144574721dea2646970667358221220e4f6ecbd113f4b158c28d6c8b3c4ea9f9068cd642ab37c15c378be64fb4f9c2464736f6c63430008140033", + "linkReferences": {}, + "deployedLinkReferences": {}, + "immutableReferences": { + "9": [ + { + "length": 32, + "start": 937 + }, + { + "length": 32, + "start": 1220 + }, + { + "length": 32, + "start": 1855 + }, + { + "length": 32, + "start": 5021 + }, + { + "length": 32, + "start": 5899 + } + ] + }, + "inputSourceName": "project/solidity/pool/CommunityPool.sol", + "buildInfoId": "solc-0_8_20-72ee5748cdf90fff16b9927d5bd48bd3979ef8d1" +} \ No newline at end of file diff --git a/contracts/solidity/pool/CommunityPool.sol b/contracts/solidity/pool/CommunityPool.sol new file mode 100644 index 00000000..f576f353 --- /dev/null +++ b/contracts/solidity/pool/CommunityPool.sol @@ -0,0 +1,498 @@ +// SPDX-License-Identifier: LGPL-3.0-only +pragma solidity >=0.8.17; + +import "../precompiles/erc20/IERC20.sol"; +import "../precompiles/staking/StakingI.sol" as staking; +import "../precompiles/distribution/DistributionI.sol" as distribution; + +/// @title CommunityPool +/// @notice Pooled staking contract with internal ownership units. +/// @dev +/// - Units (`unitsOf`) represent proportional ownership of bondToken principal. +/// - Principal buckets: stakeablePrincipalLedger (liquid stakeable), totalStaked (bonded), +/// pendingWithdrawReserve/maturedWithdrawReserve (async user exits), rewardReserve (liquid rewards). +/// - Bookkeeping can lag staking until reconcileTotalStaked (automationCaller) or syncTotalStaked (owner). +/// - principalAssets (= stakeable + bonded) drives deposit minting and pricePerUnit. +/// - withdraw sizes on totalStaked. +/// - User withdraw: undelegate then claim after maturity. +contract CommunityPool { + /// @dev Native token contract used for deposits/withdrawals. + IERC20 public immutable bondToken; + /// @dev Fixed-point precision used for reward index math. + uint256 public constant PRECISION = 1e18; + + address public owner; + /// @dev Optional automation caller allowed to trigger periodic stake/harvest. + address public automationCaller; + /// @dev Total ownership units minted by the pool. + uint256 public totalUnits; + /// @dev Bonded delegated principal only. Not auto-reconciled with staking. + uint256 public totalStaked; + /// @dev Accumulated rewards per ownership unit (scaled by PRECISION). + uint256 public accRewardPerUnit; + /// @dev Total liquid rewards reserved for reward claims. + uint256 public rewardReserve; + /// @dev Principal liquid explicitly tracked as stake-eligible balance. + uint256 public stakeablePrincipalLedger; + /// @dev Principal requested for withdraw and not yet moved into matured-withdraw reserve. + uint256 public pendingWithdrawReserve; + /// @dev Liquid principal reserved for matured-but-unclaimed withdraw requests. + uint256 public maturedWithdrawReserve; + /// @dev Monotonic identifier for withdraw requests. + uint256 public nextWithdrawRequestId = 1; + uint32 public maxRetrieve; + uint32 public maxValidators; + uint256 public minStakeAmount; + + /// @dev Units held per user. User ownership fraction = unitsOf[user] / totalUnits. + mapping(address => uint256) public unitsOf; + /// @dev User reward checkpoint for index accounting. + mapping(address => uint256) public rewardDebt; + /// @dev Async principal withdraw requests keyed by request id. + mapping(uint256 => WithdrawRequest) public withdrawRequests; + + /// @dev Minimal reentrancy guard state (0=not entered, 1=entered). + uint256 private _entered; + + struct WithdrawRequest { + address owner; + uint256 amountOut; + uint64 maturityTime; + bool reserveMoved; + bool claimed; + } + + error Unauthorized(); + error InvalidAddress(); + error InvalidAmount(); + error InvalidUnits(); + error InvalidConfig(); + error EmptyPool(); + error InsufficientLiquid(uint256 requested, uint256 available); + error TokenTransferFailed(); + error TokenTransferFromFailed(); + error HarvestFailed(); + error ZeroMintedUnits(); + error ZeroUnitsWithPrincipalAssets(uint256 principalAssetsBefore); + error RequestAlreadyClaimed(); + error RequestNotMatured(uint64 maturityTime, uint64 currentTime); + error InvalidRequest(); + error InvalidCompletionTime(int64 completionTime, uint64 currentTime); + error UnexpectedUndelegatedAmount(uint256 requested, uint256 undelegated); + error WithdrawRequiresAllPrincipalBonded(uint256 stakeablePrincipal); + error FullExitLeavesNonStakedPrincipal(uint256 stakeablePrincipal); + error RewardReserveInvariantViolation(uint256 rewardReserve, uint256 liquidBalance); + error LiquidReserveInvariantViolation(uint256 reservedAmount, uint256 liquidBalance); + error StakeablePrincipalInvariantViolation(uint256 accountedLiquid, uint256 liquidBalance); + + event OwnershipTransferred(address indexed previousOwner, address indexed newOwner); + event AutomationCallerUpdated(address indexed previousCaller, address indexed newCaller); + event ConfigUpdated(uint32 maxRetrieve, uint32 maxValidators, uint256 minStakeAmount); + event Deposit(address indexed user, uint256 amount, uint256 mintedUnits, uint256 totalUnitsAfter); + event Stake(uint256 liquidBefore, uint256 delegatedAmount, uint256 validatorsCount, uint256 totalStakedAfter); + event Harvest(uint256 liquidBefore, uint256 liquidAfter, uint256 harvestedAmount); + event RewardIndexUpdated(uint256 harvestedAmount, uint256 accRewardPerUnit, uint256 rewardReserve); + event RewardsClaimed(address indexed user, uint256 amount); + event WithdrawRequested( + address indexed user, + uint256 indexed requestId, + uint256 units, + uint256 amountOut, + uint64 maturityTime + ); + event WithdrawClaimed(address indexed user, uint256 indexed requestId, uint256 amountOut); + event WithdrawReserveMoved( + uint256 indexed requestId, + uint256 amountOut, + uint256 pendingWithdrawReserveAfter, + uint256 maturedWithdrawReserveAfter + ); + event TotalStakedSynced(uint256 previousTotalStaked, uint256 newTotalStaked); + event TotalStakedReconciled(uint256 previousTotalStaked, uint256 newTotalStaked); + + modifier onlyOwner() { + if (msg.sender != owner) { + revert Unauthorized(); + } + _; + } + + modifier onlyAutomationOrOwner() { + if (msg.sender != owner && msg.sender != automationCaller) { + revert Unauthorized(); + } + _; + } + + modifier onlyAutomationCaller() { + if (msg.sender != automationCaller) { + revert Unauthorized(); + } + _; + } + + modifier nonReentrant() { + require(_entered == 0, "reentrancy"); + _entered = 1; + _; + _entered = 0; + } + + constructor( + address bondToken_, + uint32 maxRetrieve_, + uint32 maxValidators_, + uint256 minStakeAmount_, + address owner_ + ) { + if (bondToken_ == address(0) || owner_ == address(0)) { + revert InvalidAddress(); + } + if (maxValidators_ == 0) { + revert InvalidConfig(); + } + + bondToken = IERC20(bondToken_); + maxRetrieve = maxRetrieve_; + maxValidators = maxValidators_; + minStakeAmount = minStakeAmount_; + owner = owner_; + automationCaller = owner_; + } + + /// @notice Transfers owner privileges to a new address. + function transferOwnership(address newOwner) external onlyOwner { + if (newOwner == address(0)) { + revert InvalidAddress(); + } + + address previousOwner = owner; + owner = newOwner; + emit OwnershipTransferred(previousOwner, newOwner); + } + + /// @notice Sets the automation caller for `stake`/`harvest`/`reconcileTotalStaked` (owner may still run stake/harvest). + function setAutomationCaller(address newAutomationCaller) external onlyOwner { + if (newAutomationCaller == address(0)) { + revert InvalidAddress(); + } + address previousCaller = automationCaller; + automationCaller = newAutomationCaller; + emit AutomationCallerUpdated(previousCaller, newAutomationCaller); + } + + /// @notice Updates operational parameters used by stake/harvest. + /// @param newMaxRetrieve Max validator rewards to claim per harvest call. + /// @param newMaxValidators Max bonded validators to target in one stake call. + /// @param newMinStakeAmount Minimum liquid threshold required to run `stake`. + function setConfig( + uint32 newMaxRetrieve, + uint32 newMaxValidators, + uint256 newMinStakeAmount + ) external onlyOwner { + if (newMaxValidators == 0) { + revert InvalidConfig(); + } + + maxRetrieve = newMaxRetrieve; + maxValidators = newMaxValidators; + minStakeAmount = newMinStakeAmount; + emit ConfigUpdated(newMaxRetrieve, newMaxValidators, newMinStakeAmount); + } + + /// @notice Sets bonded totalStaked only (owner). + function syncTotalStaked(uint256 newTotalStaked) external onlyOwner { + uint256 previous = totalStaked; + totalStaked = newTotalStaked; + emit TotalStakedSynced(previous, newTotalStaked); + } + + /// @notice Current liquid token balance owned by the contract. + function liquidBalance() public view returns (uint256) { + return bondToken.balanceOf(address(this)); + } + + /// @notice Current liquid principal available for stake/deposit pricing. + /// @dev Ledger-driven value; independent from raw balance deltas. + function principalLiquid() public view returns (uint256) { + return stakeablePrincipalLedger; + } + + /// @notice Total principal assets used for ownership pricing. + /// @dev Stakeable liquid + bonded principal. + function principalAssets() public view returns (uint256) { + return principalLiquid() + totalStaked; + } + + /// @notice Total principal currently committed to pending or matured async withdraw requests. + function totalWithdrawCommitments() external view returns (uint256) { + return pendingWithdrawReserve + maturedWithdrawReserve; + } + + /// @notice Returns 1e18-scaled token value per ownership unit. + function pricePerUnit() external view returns (uint256) { + if (totalUnits == 0) { + return 1e18; + } + return (principalAssets() * 1e18) / totalUnits; + } + + /// @notice Deposits tokens and mints proportional pool units. + /// @dev + /// - First deposit mints 1:1 units. + /// - Later deposits mint: floor(amount * totalUnits / principalAssets). + /// - Floor rounding avoids over-minting; tiny deposits that would mint 0 units revert. + function deposit(uint256 amount) external nonReentrant returns (uint256 mintedUnits) { + if (amount == 0) { + revert InvalidAmount(); + } + + _claimPendingRewards(msg.sender); + + uint256 assetsBefore = principalAssets(); + if (totalUnits == 0) { + if (assetsBefore != 0) { + revert ZeroUnitsWithPrincipalAssets(assetsBefore); + } + mintedUnits = amount; + } else { + mintedUnits = (amount * totalUnits) / assetsBefore; + } + + if (mintedUnits == 0) { + revert ZeroMintedUnits(); + } + + if (!bondToken.transferFrom(msg.sender, address(this), amount)) { + revert TokenTransferFromFailed(); + } + + stakeablePrincipalLedger += amount; + unitsOf[msg.sender] += mintedUnits; + totalUnits += mintedUnits; + rewardDebt[msg.sender] = (unitsOf[msg.sender] * accRewardPerUnit) / PRECISION; + _assertReserveInvariant(); + + emit Deposit(msg.sender, amount, mintedUnits, totalUnits); + } + + /// @notice Requests an async staked-principal withdrawal by burning ownership units now. + /// @dev + /// - Withdrawal sizing uses bonded `totalStaked` only. + /// - Conservative audit policy: withdrawals are blocked unless all principal is bonded + /// (`stakeablePrincipalLedger == 0`). + /// - Final payout happens via `claimWithdraw` after maturity. + /// - Undelegation source validators are selected internally by staking precompile. + function withdraw(uint256 userUnits) external nonReentrant returns (uint256 requestId) { + if (userUnits == 0) { + revert InvalidUnits(); + } + + _claimPendingRewards(msg.sender); + + uint256 userBalanceUnits = unitsOf[msg.sender]; + if (userUnits > userBalanceUnits || totalUnits == 0) { + revert InvalidUnits(); + } + uint256 stakeable = stakeablePrincipalLedger; + if (userUnits == totalUnits) { + if (stakeable > 0) { + revert FullExitLeavesNonStakedPrincipal(stakeable); + } + } else if (stakeable > 0) { + revert WithdrawRequiresAllPrincipalBonded(stakeable); + } + + uint256 amountOut = (userUnits * totalStaked) / totalUnits; + if (amountOut == 0) { + revert InvalidAmount(); + } + uint64 currentTime = uint64(block.timestamp); + uint256 undelegatedAmount; + int64 completionTime; + (undelegatedAmount,, completionTime) = staking.STAKING_CONTRACT.undelegateFromBondedValidators( + address(this), + amountOut, + maxValidators + ); + if (undelegatedAmount != amountOut) { + revert UnexpectedUndelegatedAmount(amountOut, undelegatedAmount); + } + if (completionTime <= 0 || uint64(completionTime) < currentTime) { + revert InvalidCompletionTime(completionTime, currentTime); + } + uint64 maturityTime = uint64(completionTime); + + unitsOf[msg.sender] = userBalanceUnits - userUnits; + totalUnits -= userUnits; + totalStaked -= amountOut; + pendingWithdrawReserve += amountOut; + rewardDebt[msg.sender] = (unitsOf[msg.sender] * accRewardPerUnit) / PRECISION; + _assertReserveInvariant(); + + requestId = nextWithdrawRequestId++; + withdrawRequests[requestId] = WithdrawRequest({ + owner: msg.sender, + amountOut: amountOut, + maturityTime: maturityTime, + reserveMoved: false, + claimed: false + }); + + emit WithdrawRequested(msg.sender, requestId, userUnits, amountOut, maturityTime); + } + + /// @notice Claims a matured async withdrawal request. + /// @dev + /// - On first successful matured claim path, request amount is moved from + /// `pendingWithdrawReserve` to `maturedWithdrawReserve`. + /// - Payout consumes `maturedWithdrawReserve` and transfers principal to request owner. + function claimWithdraw(uint256 requestId) external nonReentrant returns (uint256 amountOut) { + WithdrawRequest storage request = withdrawRequests[requestId]; + if (request.owner == address(0)) { + revert InvalidRequest(); + } + if (request.owner != msg.sender) { + revert Unauthorized(); + } + if (request.claimed) { + revert RequestAlreadyClaimed(); + } + + uint64 currentTime = uint64(block.timestamp); + if (currentTime < request.maturityTime) { + revert RequestNotMatured(request.maturityTime, currentTime); + } + + if (!request.reserveMoved) { + if (request.amountOut > pendingWithdrawReserve) { + revert InsufficientLiquid(request.amountOut, pendingWithdrawReserve); + } + pendingWithdrawReserve -= request.amountOut; + maturedWithdrawReserve += request.amountOut; + request.reserveMoved = true; + emit WithdrawReserveMoved(requestId, request.amountOut, pendingWithdrawReserve, maturedWithdrawReserve); + } + + request.claimed = true; + amountOut = request.amountOut; + if (amountOut > maturedWithdrawReserve) { + revert InsufficientLiquid(amountOut, maturedWithdrawReserve); + } + uint256 liquidPrincipalBefore = liquidBalance() - rewardReserve; + if (amountOut > liquidPrincipalBefore) { + revert InsufficientLiquid(amountOut, liquidPrincipalBefore); + } + maturedWithdrawReserve -= amountOut; + if (!bondToken.transfer(msg.sender, amountOut)) { + revert TokenTransferFailed(); + } + _assertReserveInvariant(); + + emit WithdrawClaimed(msg.sender, requestId, amountOut); + } + + /// @notice Delegates available principal liquid to bonded validators via staking precompile. + /// @dev Validator selection is owned by the staking precompile's bonded-validator query order. The + /// poolrebalancer later corrects drift against its own top-power target set, so exact remainder + /// ordering here is not an accounting invariant. + /// @dev Increments bonded `totalStaked` only. + function stake() external nonReentrant onlyAutomationOrOwner returns (uint256 delegatedAmount) { + uint256 liquidBefore = stakeablePrincipalLedger; + if (liquidBefore < minStakeAmount) { + return 0; + } + uint32 validatorsCount; + (delegatedAmount, validatorsCount) = staking.STAKING_CONTRACT.delegateToBondedValidators( + address(this), + liquidBefore, + maxValidators + ); + + stakeablePrincipalLedger -= delegatedAmount; + totalStaked += delegatedAmount; + _assertReserveInvariant(); + emit Stake(liquidBefore, delegatedAmount, uint256(validatorsCount), totalStaked); + } + + /// @notice Sets bonded `totalStaked` to match staking-side truth (`automationCaller` only). + /// @dev Poolrebalancer `CallEVM` uses `automationCaller` as sender. Owner break-glass remains `syncTotalStaked`. + function reconcileTotalStaked(uint256 newTotalStaked) external nonReentrant onlyAutomationCaller { + uint256 previous = totalStaked; + totalStaked = newTotalStaked; + emit TotalStakedReconciled(previous, newTotalStaked); + } + + /// @notice Claims staking rewards to this contract's liquid balance. + /// @dev Callable by owner or automation caller; reverts when no units exist because rewards would have no index owner. + /// Does not modify `totalStaked` because rewards are liquid yield, not principal. + function harvest() external nonReentrant onlyAutomationOrOwner returns (uint256 harvestedAmount) { + if (totalUnits == 0) { + revert EmptyPool(); + } + + uint256 liquidBefore = liquidBalance(); + bool success = distribution.DISTRIBUTION_CONTRACT.claimRewards( + address(this), + maxRetrieve + ); + if (!success) { + revert HarvestFailed(); + } + + uint256 liquidAfter = liquidBalance(); + harvestedAmount = liquidAfter > liquidBefore ? liquidAfter - liquidBefore : 0; + if (harvestedAmount > 0) { + rewardReserve += harvestedAmount; + if (totalUnits > 0) { + accRewardPerUnit += (harvestedAmount * PRECISION) / totalUnits; + } + emit RewardIndexUpdated(harvestedAmount, accRewardPerUnit, rewardReserve); + } + _assertReserveInvariant(); + emit Harvest(liquidBefore, liquidAfter, harvestedAmount); + } + + /// @notice Claims caller's accrued rewards from the reward reserve. + /// @dev Uses reward index accounting and does not trigger distribution precompile calls. + function claimRewards() external nonReentrant returns (uint256 claimedAmount) { + claimedAmount = _claimPendingRewards(msg.sender); + } + + function _claimPendingRewards(address user) internal returns (uint256 claimedAmount) { + uint256 accumulated = (unitsOf[user] * accRewardPerUnit) / PRECISION; + uint256 debt = rewardDebt[user]; + rewardDebt[user] = accumulated; + if (accumulated <= debt) { + return 0; + } + + claimedAmount = accumulated - debt; + rewardReserve -= claimedAmount; + if (!bondToken.transfer(user, claimedAmount)) { + revert TokenTransferFailed(); + } + _assertReserveInvariant(); + + emit RewardsClaimed(user, claimedAmount); + } + + function _assertReserveInvariant() internal view { + uint256 liquid = liquidBalance(); + if (rewardReserve > liquid) { + revert RewardReserveInvariantViolation(rewardReserve, liquid); + } + // Only liquid reserves are constrained by current liquid balance. + // Pending withdraw reserve is intentionally excluded because it is not yet + // moved to the matured (liquid, claim-ready) reserve. + uint256 reserved = rewardReserve + maturedWithdrawReserve; + if (reserved > liquid) { + revert LiquidReserveInvariantViolation(reserved, liquid); + } + uint256 accountedLiquid = stakeablePrincipalLedger + rewardReserve + maturedWithdrawReserve; + if (accountedLiquid > liquid) { + revert StakeablePrincipalInvariantViolation(accountedLiquid, liquid); + } + } + +} diff --git a/contracts/solidity/pool/README.md b/contracts/solidity/pool/README.md new file mode 100644 index 00000000..4e549bb7 --- /dev/null +++ b/contracts/solidity/pool/README.md @@ -0,0 +1,160 @@ +# CommunityPool contract + +The `CommunityPool` contract is a pooled staking vault for a single bond token. +Users deposit tokens and receive internal ownership units, while the contract +stakes principal through staking precompiles and handles rewards and async withdrawals. + +For **poolrebalancer module** configuration, ABCI ordering, and +`reconcileTotalStaked` behavior, see +[`docs/poolrebalancer/community_pool_runbook.md`](../../../docs/poolrebalancer/community_pool_runbook.md). + +## Goals + +- Keep pool ownership simple (`unitsOf[user] / totalUnits`). +- Separate principal accounting (liquid, bonded, withdraw reserves) from reward accounting. +- Support async withdrawals for staked principal (request now, claim at maturity). +- Keep heavy validator selection logic in precompiles. + +## Main components + +- **Bond token**: `bondToken` (ERC20 representation of chain bond denom). +- **Ownership units**: `unitsOf`, `totalUnits`. +- **Principal accounting**: + - `stakeablePrincipalLedger`: liquid principal available for `stake`. + - `totalStaked`: accounting view of **bonded** delegated principal. + - `pendingWithdrawReserve` / `maturedWithdrawReserve`: async **user** withdraw pipeline. +- **Rewards accounting**: + - `rewardReserve`, `accRewardPerUnit`, `rewardDebt[user]`: index-based reward accrual. + +## Lifecycle + +### 1) Deposit + +`deposit(amount)`: + +- Reverts on `amount == 0`. +- Claims caller pending rewards against the current reward index. +- Mints units: + - first deposit: `mintedUnits = amount` + - otherwise: `mintedUnits = floor(amount * totalUnits / principalAssets())` +- Rejects deposit when `totalUnits == 0` but `principalAssets() > 0` (`ZeroUnitsWithPrincipalAssets`), preventing orphan-accounted principal from being captured by a new first depositor. +- `principalAssets()` = `stakeablePrincipalLedger + totalStaked`. +- Reverts with `ZeroMintedUnits()` if floor rounding gives `0`. +- Transfers tokens in and increases `stakeablePrincipalLedger`. + +### 2) Stake + +`stake()`: + +- Callable only by `owner` or `automationCaller`. +- No-op when `stakeablePrincipalLedger < minStakeAmount`. +- Calls staking precompile `delegateToBondedValidators(address(this), liquid, maxValidators)`. +- Validator choice and remainder ordering come from the staking precompile's bonded-validator query order; the poolrebalancer separately targets bonded-by-power order and corrects drift after staking. +- Moves delegated amount from `stakeablePrincipalLedger` to `totalStaked`. + +### 3) Harvest and claim rewards + +`harvest()`: + +- Callable only by `owner` or `automationCaller`. +- Reverts with `EmptyPool()` when `totalUnits == 0`; rewards are not claimed into `rewardReserve` unless they can be distributed through the reward index. +- Calls distribution precompile to claim validator rewards to the contract balance. +- Updates `rewardReserve` and `accRewardPerUnit` for positive harvested rewards. + +`claimRewards()`: + +- Uses reward index delta per user; transfers from `rewardReserve`. + +### 4) Async withdraw (user) + +`withdraw(userUnits)`: + +- Claims caller pending rewards against the current reward index before unit burn. +- `amountOut = userUnits * totalStaked / totalUnits` (**bonded principal only**). +- Conservative pre-audit guard: withdraw is allowed only when all withdraw-relevant principal is bonded. + - Full exit rejects with `FullExitLeavesNonStakedPrincipal(uint256)` when non-staked principal remains. + - Partial withdraw rejects with `WithdrawRequiresAllPrincipalBonded(uint256)` when non-staked principal remains. +- Calls `undelegateFromBondedValidators`; burns units; decreases `totalStaked`; increases `pendingWithdrawReserve`. + +`claimWithdraw(requestId)`: + +- Moves reserve to matured, then pays out after maturity. + +### 5) Total reconcile (automation only) + +`reconcileTotalStaked(newTotalStaked)`: + +- Callable only by **`automationCaller`** (not `owner`). +- Sets bonded accounting to match keeper-computed staking truth. +- Owner may use `syncTotalStaked` for owner-driven bonded-only adjustments. + +## Key view methods + +- `liquidBalance()`: ERC20 balance of the contract. +- `principalLiquid()`: `stakeablePrincipalLedger`. +- `principalAssets()`: `stakeablePrincipalLedger + totalStaked`. +- `pricePerUnit()`: `principalAssets * 1e18 / totalUnits` (or `1e18` if `totalUnits == 0`). +- `totalWithdrawCommitments()`: `pendingWithdrawReserve + maturedWithdrawReserve`. + +## Invariants enforced on state changes + +`_assertReserveInvariant()` (on deposit, stake, reward-claim, withdraw paths, etc.): + +- `rewardReserve <= liquidBalance` +- `rewardReserve + maturedWithdrawReserve <= liquidBalance` +- `stakeablePrincipalLedger + rewardReserve + maturedWithdrawReserve <= liquidBalance` + +`pendingWithdrawReserve` is excluded from liquid checks (principal requested for unbonding, not yet claim-ready). `reconcileTotalStaked` does **not** invoke this invariant (no balance movement). + +## Admin operations + +- `setConfig(...)`, `setAutomationCaller(...)`, `syncTotalStaked(...)`, `transferOwnership(...)`: **`onlyOwner`**. +- `setAutomationCaller`: configures the address that may call `reconcileTotalStaked` and (with owner) `stake` / `harvest`. In production this should be the **poolrebalancer module EVM address** (see runbook). + +## Poolrebalancer EndBlock automation + +The module calls the pool contract with **`msg.sender =` module EVM address** (same as `automationCaller` on the contract). + +### Required configuration + +1. `setAutomationCaller()` on CommunityPool. +2. `poolrebalancer.params.pool_delegator_address =` CommunityPool account (bech32). + +### EndBlock order (application) + +After **staking** has finished matured unbonding payouts for the block: + +1. **Strict**: complete pending redelegations. +2. **Best-effort**: **`reconcileTotalStaked`**, then **`harvest`**, then **`stake`**, then rebalance processing, then a post-rebalance **`reconcileTotalStaked`** pass on successful rebalance. + +See the runbook for halting vs best-effort behavior. + +### ACL summary + +- `reconcileTotalStaked`: **`automationCaller` only**. +- `stake`, `harvest`: `owner` or `automationCaller`. +- `syncTotalStaked`: `owner`. + +### Failure symptoms + +- `Unauthorized()` on `stake` / `harvest`: `automationCaller` mismatch or wrong sender. +- `Unauthorized()` on `reconcileTotalStaked`: **owner** or any address other than `automationCaller` (unless automation was retargeted). + +## Events (indexers) + +- `TotalStakedReconciled(previousTotalStaked, newTotalStaked)`. + +## Error model (selected) + +- Permissions / inputs: `InvalidAmount`, `InvalidUnits`, `InvalidConfig`, `EmptyPool`, `Unauthorized`. +- Exit safety: `FullExitLeavesNonStakedPrincipal`. +- External: `UnexpectedUndelegatedAmount`, `InvalidCompletionTime`, `HarvestFailed`. +- Reserves: `InsufficientLiquid`, `RewardReserveInvariantViolation`, `LiquidReserveInvariantViolation`, `StakeablePrincipalInvariantViolation`. + +## Test coverage + +- **Foundry** (pool-focused): `contracts/test/pool/CommunityPoolWithdrawStake.t.sol` — run from `contracts/` with Forge (see `foundry.toml` and file headers). +- **Go** artifact smoke: `contracts/community_pool_test.go`. +- **Integration** (Ginkgo): `tests/integration/precompiles/communitypool/` — `test_integration.go`, `test_utils.go`, `TEST_ASSUMPTIONS.md`. + - Integration validates request/maturity/claim outputs and invariants. + - Exact ERC20 balance-delta equality for `claimWithdraw` is asserted in Forge (deterministic local mocks). diff --git a/contracts/solidity/precompiles/staking/StakingI.sol b/contracts/solidity/precompiles/staking/StakingI.sol index 3c55d0d9..69feea94 100644 --- a/contracts/solidity/precompiles/staking/StakingI.sol +++ b/contracts/solidity/precompiles/staking/StakingI.sol @@ -174,6 +174,18 @@ interface StakingI { uint256 amount ) external returns (bool success); + /// @dev Defines a method for delegating a total amount across bonded validators. + /// @param delegatorAddress The address of the delegator. + /// @param amount The total amount of bond denomination to delegate. + /// @param maxValidators Max bonded validators to include (first N in precompile order). + /// @return delegatedAmount The total amount actually delegated. + /// @return validatorsUsed Number of validators used for the delegation. + function delegateToBondedValidators( + address delegatorAddress, + uint256 amount, + uint32 maxValidators + ) external returns (uint256 delegatedAmount, uint32 validatorsUsed); + /// @dev Defines a method for performing an undelegation from a delegate and a validator. /// @param delegatorAddress The address of the delegator /// @param validatorAddress The address of the validator @@ -186,6 +198,19 @@ interface StakingI { uint256 amount ) external returns (int64 completionTime); + /// @dev Defines a method for undelegating a total amount across bonded validators. + /// @param delegatorAddress The address of the delegator. + /// @param amount The total amount of bond denomination to undelegate. + /// @param maxValidators Max bonded validators to undelegate from. + /// @return undelegatedAmount The total amount actually undelegated. + /// @return validatorsUsed Number of validators used for the undelegation. + /// @return maturityTime The maximum completion time across internal undelegations. + function undelegateFromBondedValidators( + address delegatorAddress, + uint256 amount, + uint32 maxValidators + ) external returns (uint256 undelegatedAmount, uint32 validatorsUsed, int64 maturityTime); + /// @dev Defines a method for performing a redelegation /// of coins from a delegator and source validator to a destination validator. /// @param delegatorAddress The address of the delegator diff --git a/contracts/test/pool/CommunityPoolHarvest.t.sol b/contracts/test/pool/CommunityPoolHarvest.t.sol new file mode 100644 index 00000000..b214e620 --- /dev/null +++ b/contracts/test/pool/CommunityPoolHarvest.t.sol @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: LGPL-3.0-only +pragma solidity ^0.8.20; + +// Harvest reward-index behavior; distribution precompile mocked with vm.etch. +// Run from repo: +// +// cd contracts && npm ci && forge test --match-contract CommunityPoolHarvestTest + +import {Test} from "forge-std/Test.sol"; +import {ERC20} from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + +import {CommunityPool} from "../../solidity/pool/CommunityPool.sol"; + +address constant DISTRIBUTION_PRECOMPILE_HARVEST = address(uint160(0x801)); + +contract MockBondHarvest is ERC20 { + constructor() ERC20("Bond", "BOND") {} + + function mint(address to, uint256 value) external { + _mint(to, value); + } +} + +contract MockDistributionHarvest { + MockBondHarvest internal immutable bond; + uint256 internal immutable rewardAmount; + + constructor(MockBondHarvest bond_, uint256 rewardAmount_) { + bond = bond_; + rewardAmount = rewardAmount_; + } + + function claimRewards(address delegatorAddress, uint32) external returns (bool success) { + if (rewardAmount > 0) { + bond.mint(delegatorAddress, rewardAmount); + } + return true; + } +} + +contract CommunityPoolHarvestTest is Test { + MockBondHarvest internal bond; + CommunityPool internal pool; + address internal alice = address(0xA11CE); + address internal bob = address(0xB0B); + + function setUp() public { + bond = new MockBondHarvest(); + pool = new CommunityPool(address(bond), 10, 5, 1 ether, address(this)); + } + + function _mockDistributionReward(uint256 rewardAmount) internal { + MockDistributionHarvest distribution = new MockDistributionHarvest(bond, rewardAmount); + vm.etch(DISTRIBUTION_PRECOMPILE_HARVEST, address(distribution).code); + } + + function test_Harvest_revertsWhenPoolEmpty() public { + _mockDistributionReward(10 ether); + + vm.expectRevert(CommunityPool.EmptyPool.selector); + pool.harvest(); + + assertEq(bond.balanceOf(address(pool)), 0); + assertEq(pool.rewardReserve(), 0); + assertEq(pool.accRewardPerUnit(), 0); + } + + function test_Harvest_updatesReserveAndRewardIndexWhenPoolHasUnits() public { + bond.mint(address(this), 100 ether); + bond.approve(address(pool), type(uint256).max); + pool.deposit(100 ether); + + _mockDistributionReward(7 ether); + + uint256 harvestedAmount = pool.harvest(); + + assertEq(harvestedAmount, 7 ether); + assertEq(pool.rewardReserve(), 7 ether); + assertEq(pool.accRewardPerUnit(), 0.07 ether); + + uint256 claimedAmount = pool.claimRewards(); + assertEq(claimedAmount, 7 ether); + assertEq(pool.rewardReserve(), 0); + assertEq(bond.balanceOf(address(this)), 7 ether); + } + + function test_Harvest_zeroRewardLeavesReserveAndIndexUnchanged() public { + bond.mint(address(this), 100 ether); + bond.approve(address(pool), type(uint256).max); + pool.deposit(100 ether); + + _mockDistributionReward(0); + + uint256 indexBefore = pool.accRewardPerUnit(); + uint256 harvestedAmount = pool.harvest(); + + assertEq(harvestedAmount, 0); + assertEq(pool.rewardReserve(), 0); + assertEq(pool.accRewardPerUnit(), indexBefore); + } + +} diff --git a/contracts/test/pool/CommunityPoolWithdrawStake.t.sol b/contracts/test/pool/CommunityPoolWithdrawStake.t.sol new file mode 100644 index 00000000..0dbbc8a7 --- /dev/null +++ b/contracts/test/pool/CommunityPoolWithdrawStake.t.sol @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: LGPL-3.0-only +pragma solidity ^0.8.20; + +// Withdraw/stake interaction in bonded-only reconcile model; staking precompile mocked (vm.mockCall). +// Run from repo: +// +// cd contracts && npm ci && forge test --match-contract CommunityPoolWithdrawStakeTest + +import {Test} from "forge-std/Test.sol"; +import {ERC20} from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + +import {CommunityPool} from "../../solidity/pool/CommunityPool.sol"; +import {StakingI} from "../../solidity/precompiles/staking/StakingI.sol"; + +address constant STAKING_PRECOMPILE = address(uint160(0x800)); +address constant DISTRIBUTION_PRECOMPILE = address(uint160(0x801)); + +contract AutomationProxyWS { + function reconcile(CommunityPool pool, uint256 bonded) external { + pool.reconcileTotalStaked(bonded); + } +} + +contract MockBondWS is ERC20 { + constructor() ERC20("Bond", "BOND") {} + + function mint(address to, uint256 value) external { + _mint(to, value); + } +} + +contract MockDistributionWS { + MockBondWS internal immutable bond; + uint256 internal immutable rewardAmount; + + constructor(MockBondWS bond_, uint256 rewardAmount_) { + bond = bond_; + rewardAmount = rewardAmount_; + } + + function claimRewards(address delegatorAddress, uint32) external returns (bool success) { + if (rewardAmount > 0) { + bond.mint(delegatorAddress, rewardAmount); + } + return true; + } +} + +contract CommunityPoolWithdrawStakeTest is Test { + MockBondWS internal bond; + CommunityPool internal pool; + AutomationProxyWS internal automation; + address internal alice = address(0xA11CE); + address internal bob = address(0xB0B); + + function setUp() public { + bond = new MockBondWS(); + pool = new CommunityPool(address(bond), 10, 5, 1 ether, address(this)); + automation = new AutomationProxyWS(); + pool.setAutomationCaller(address(automation)); + _mockDistributionNoReward(); + } + + function _mockDistributionNoReward() internal { + MockDistributionWS distribution = new MockDistributionWS(bond, 0); + vm.etch(DISTRIBUTION_PRECOMPILE, address(distribution).code); + } + + function _mockDistributionReward(uint256 rewardAmount) internal { + MockDistributionWS distribution = new MockDistributionWS(bond, rewardAmount); + vm.etch(DISTRIBUTION_PRECOMPILE, address(distribution).code); + } + + function _mockUndelegate(address poolAddr, uint256 amountOut, uint32 maxValidators, int64 maturityTime) internal { + bytes memory callData = abi.encodeCall( + StakingI.undelegateFromBondedValidators, + (poolAddr, amountOut, maxValidators) + ); + bytes memory ret = abi.encode(amountOut, uint32(1), maturityTime); + vm.mockCall(STAKING_PRECOMPILE, callData, ret); + } + + function _mockDelegate( + address poolAddr, + uint256 liquidAmount, + uint32 maxValidators, + uint256 delegatedAmount, + uint32 validatorsUsed + ) internal { + bytes memory callData = abi.encodeCall( + StakingI.delegateToBondedValidators, + (poolAddr, liquidAmount, maxValidators) + ); + bytes memory ret = abi.encode(delegatedAmount, validatorsUsed); + vm.mockCall(STAKING_PRECOMPILE, callData, ret); + } + + function test_PrincipalAssets_EqualsStakeablePlusBonded() public { + bond.mint(address(this), 200 ether); + bond.approve(address(pool), type(uint256).max); + pool.deposit(120 ether); + assertEq(pool.principalAssets(), 120 ether); + + _mockDelegate(address(pool), 120 ether, pool.maxValidators(), 100 ether, 2); + pool.stake(); + assertEq(pool.stakeablePrincipalLedger(), 20 ether); + assertEq(pool.totalStaked(), 100 ether); + assertEq(pool.principalAssets(), 120 ether); + } + + function test_Withdraw_allowsFullExitWhenNoNonStakedPrincipalRemains() public { + bond.mint(address(this), 200 ether); + bond.approve(address(pool), type(uint256).max); + pool.deposit(200 ether); + bond.mint(address(pool), 500 ether); + automation.reconcile(pool, 100 ether); + + _mockDelegate(address(pool), 200 ether, pool.maxValidators(), 200 ether, 2); + pool.stake(); + + uint256 fullUnits = pool.totalUnits(); + uint256 amountOut = (fullUnits * pool.totalStaked()) / pool.totalUnits(); + _mockUndelegate( + address(pool), + amountOut, + pool.maxValidators(), + int64(uint64(block.timestamp + 86_400)) + ); + pool.withdraw(fullUnits); + + assertEq(pool.totalUnits(), 0); + assertEq(pool.stakeablePrincipalLedger(), 0); + } + + function test_Withdraw_revertsWhenStakeablePrincipalLedgerIsNonZero() public { + bond.mint(address(this), 200 ether); + bond.approve(address(pool), type(uint256).max); + pool.deposit(200 ether); + vm.expectRevert(abi.encodeWithSelector(CommunityPool.WithdrawRequiresAllPrincipalBonded.selector, 200 ether)); + pool.withdraw(100 ether); + } + + function test_Stake_movesStakeableToBonded() public { + bond.mint(address(this), 300 ether); + bond.approve(address(pool), type(uint256).max); + pool.deposit(100 ether); + assertEq(pool.stakeablePrincipalLedger(), 100 ether); + + _mockDelegate(address(pool), 100 ether, pool.maxValidators(), 100 ether, 3); + pool.stake(); + + assertEq(pool.totalStaked(), 100 ether); + assertEq(pool.stakeablePrincipalLedger(), 0); + + bond.mint(address(this), 20 ether); + pool.deposit(20 ether); + + _mockDelegate(address(pool), 20 ether, pool.maxValidators(), 20 ether, 1); + pool.stake(); + + assertEq(pool.totalStaked(), 120 ether); + assertEq(pool.stakeablePrincipalLedger(), 0); + } + + function test_Stake_whenTotalUnitsZero_noopsWithEmptyLedger() public { + assertEq(pool.totalUnits(), 0); + assertEq(pool.stakeablePrincipalLedger(), 0); + assertEq(pool.totalStaked(), 0); + + uint256 delegatedAmount = pool.stake(); + + assertEq(delegatedAmount, 0); + assertEq(pool.totalUnits(), 0); + assertEq(pool.stakeablePrincipalLedger(), 0); + assertEq(pool.totalStaked(), 0); + } + + function test_ReconcileTotalStaked_onlyAutomationCaller() public { + vm.expectRevert(abi.encodeWithSelector(CommunityPool.Unauthorized.selector)); + pool.reconcileTotalStaked(1 ether); + + automation.reconcile(pool, 9 ether); + assertEq(pool.totalStaked(), 9 ether); + } + + function test_SyncTotalStaked_ownerOnly() public { + pool.syncTotalStaked(7 ether); + assertEq(pool.totalStaked(), 7 ether); + } + + function test_ClaimWithdraw_MaturityFlow_MovesReservesAndPaysOut() public { + _mockDistributionNoReward(); + + bond.mint(alice, 150 ether); + vm.startPrank(alice); + bond.approve(address(pool), type(uint256).max); + pool.deposit(150 ether); + vm.stopPrank(); + + _mockDelegate(address(pool), 150 ether, pool.maxValidators(), 150 ether, 2); + pool.stake(); + + uint256 userUnits = pool.unitsOf(alice); + uint256 amountOut = (userUnits * pool.totalStaked()) / pool.totalUnits(); + int64 maturity = int64(uint64(block.timestamp + 2 days)); + _mockUndelegate(address(pool), amountOut, pool.maxValidators(), maturity); + + vm.prank(alice); + uint256 requestId = pool.withdraw(userUnits); + + assertEq(pool.pendingWithdrawReserve(), amountOut); + assertEq(pool.maturedWithdrawReserve(), 0); + + vm.warp(uint64(maturity) + 1); + + uint256 balBefore = bond.balanceOf(alice); + vm.prank(alice); + uint256 claimed = pool.claimWithdraw(requestId); + uint256 balAfter = bond.balanceOf(alice); + + assertEq(claimed, amountOut); + assertEq(balAfter - balBefore, amountOut); + assertEq(pool.pendingWithdrawReserve(), 0); + assertEq(pool.maturedWithdrawReserve(), 0); + } +} diff --git a/docs/poolrebalancer/community_pool_runbook.md b/docs/poolrebalancer/community_pool_runbook.md new file mode 100644 index 00000000..84eb2c5e --- /dev/null +++ b/docs/poolrebalancer/community_pool_runbook.md @@ -0,0 +1,74 @@ +# Pool Rebalancer + CommunityPool Runbook + +This runbook documents the **redelegation-only** rebalancer model and the +corresponding CommunityPool automation behavior. + +For contract API details, see +[`contracts/solidity/pool/README.md`](../../contracts/solidity/pool/README.md). + +## Model + +- Rebalancing is performed through **redelegations only**. +- EndBlock automation for CommunityPool uses: + - `reconcileTotalStaked(uint256)` (automation caller only), + - `harvest()`, + - `stake()`. +- User withdrawals remain unchanged and still use the contract withdraw cycle. + +## Required Configuration + +- `poolrebalancer.params.pool_delegator_address` must be the CommunityPool + bech32 account. +- CommunityPool `automationCaller` must be set to the poolrebalancer module EVM + address. +- Rebalancer tuning params: + `max_target_validators`, `rebalance_threshold_bp`, `max_ops_per_block`, + `max_move_per_op`. + +## Accounting Invariants + +- `totalStaked` is the bonded delegated principal. +- `stakeablePrincipalLedger` is liquid principal available for stake. +- `principalAssets()` is expected to remain: + `stakeablePrincipalLedger + totalStaked`. +- `withdraw()` is guarded when `stakeablePrincipalLedger > 0`, so users cannot + withdraw while principal is still liquid and not fully bonded. + +## EndBlock Flow + +1. Complete matured pending redelegation tracking. +2. Best-effort CommunityPool reconcile via `reconcileTotalStaked`. +3. Best-effort CommunityPool automation (`harvest` then `stake`). +4. Best-effort `ProcessRebalance`. +5. Optional post-rebalance best-effort second reconcile pass via `reconcileTotalStaked` + (enabled by test hook, disabled by default in production keeper). + +Strict failures only apply to strict keeper phases. Reconcile/automation/rebalance +remain best-effort and are retried on later blocks. + +## Monitoring + +Primary signals: + +- Module logs around `process rebalance`, `community pool reconcile`, + and `community pool automation`. +- `evmd query poolrebalancer pending-redelegations`. +- CommunityPool views: + - `totalStaked()` + - `stakeablePrincipalLedger()` + - `principalAssets()` + +Common issues: + +- `Unauthorized` reverts on pool automation calls: automation caller mismatch. +- Reconcile drift: compare delegated bonded total vs `totalStaked` and verify + `reconcileTotalStaked` is being called from the module EVM address. + +## Related Files + +- `x/poolrebalancer/abci.go` +- `x/poolrebalancer/keeper/rebalance.go` +- `x/poolrebalancer/keeper/community_pool_reconcile.go` +- `x/poolrebalancer/keeper/community_pool_reconcile_abci.go` +- `x/poolrebalancer/keeper/community_pool.go` +- `contracts/solidity/pool/CommunityPool.sol` diff --git a/evmd/app.go b/evmd/app.go index 80d793ee..7c396ee9 100644 --- a/evmd/app.go +++ b/evmd/app.go @@ -38,6 +38,9 @@ import ( "github.com/cosmos/evm/x/ibc/transfer" transferkeeper "github.com/cosmos/evm/x/ibc/transfer/keeper" transferv2 "github.com/cosmos/evm/x/ibc/transfer/v2" + "github.com/cosmos/evm/x/poolrebalancer" + poolrebalancerkeeper "github.com/cosmos/evm/x/poolrebalancer/keeper" + poolrebalancertypes "github.com/cosmos/evm/x/poolrebalancer/types" "github.com/cosmos/evm/x/precisebank" precisebankkeeper "github.com/cosmos/evm/x/precisebank/keeper" precisebanktypes "github.com/cosmos/evm/x/precisebank/types" @@ -87,6 +90,7 @@ import ( servertypes "github.com/cosmos/cosmos-sdk/server/types" testdata_pulsar "github.com/cosmos/cosmos-sdk/testutil/testdata/testpb" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/bech32" sdkmempool "github.com/cosmos/cosmos-sdk/types/mempool" "github.com/cosmos/cosmos-sdk/types/module" "github.com/cosmos/cosmos-sdk/types/msgservice" @@ -190,11 +194,12 @@ type EVMD struct { CallbackKeeper ibccallbackskeeper.ContractKeeper // Cosmos EVM keepers - FeeMarketKeeper feemarketkeeper.Keeper - EVMKeeper *evmkeeper.Keeper - Erc20Keeper erc20keeper.Keeper - PreciseBankKeeper precisebankkeeper.Keeper - EVMMempool *evmmempool.ExperimentalEVMMempool + FeeMarketKeeper feemarketkeeper.Keeper + EVMKeeper *evmkeeper.Keeper + Erc20Keeper erc20keeper.Keeper + PreciseBankKeeper precisebankkeeper.Keeper + PoolRebalancerKeeper poolrebalancerkeeper.Keeper + EVMMempool *evmmempool.ExperimentalEVMMempool // the module manager ModuleManager *module.Manager @@ -247,8 +252,12 @@ func NewExampleApp( ibcexported.StoreKey, ibctransfertypes.StoreKey, // Cosmos EVM store keys evmtypes.StoreKey, feemarkettypes.StoreKey, erc20types.StoreKey, precisebanktypes.StoreKey, + poolrebalancertypes.StoreKey, + ) + tkeys := storetypes.NewTransientStoreKeys( + paramstypes.TStoreKey, + poolrebalancertypes.TransientStoreKey, ) - tkeys := storetypes.NewTransientStoreKeys(paramstypes.TStoreKey) oKeys := storetypes.NewObjectStoreKeys(banktypes.ObjectStoreKey, evmtypes.ObjectKey) var nonTransientKeys []storetypes.StoreKey @@ -292,8 +301,14 @@ func NewExampleApp( // Register subspace for PoA so GetSubspace(poatypes.ModuleName) returns a valid subspace. app.ParamsKeeper.Subspace(poatypes.ModuleName).WithKeyTable(poatypes.ParamKeyTable()) - // get authority address - authAddr := authtypes.NewModuleAddress(govtypes.ModuleName).String() + // Authority string must use the current Bech32 prefix. Do not use AccAddress.String() here: the SDK + // address cache is keyed only by raw bytes, so if another package encoded the same module address + // under a different prefix first, String() would return the stale prefix. + govModAddr := authtypes.NewModuleAddress(govtypes.ModuleName) + authAddr, authAddrErr := bech32.ConvertAndEncode(sdk.GetConfig().GetBech32AccountAddrPrefix(), govModAddr) + if authAddrErr != nil { + panic(authAddrErr) + } // set the BaseApp's parameter store app.ConsensusParamsKeeper = consensusparamkeeper.NewKeeper( @@ -497,6 +512,18 @@ func NewExampleApp( ), ) + app.PoolRebalancerKeeper = poolrebalancerkeeper.NewKeeper( + appCodec, + runtime.NewKVStoreService(keys[poolrebalancertypes.StoreKey]), + tkeys[poolrebalancertypes.TransientStoreKey], + app.StakingKeeper, + stakingkeeper.NewQuerier(app.StakingKeeper), + app.DistrKeeper, + authtypes.NewModuleAddress(govtypes.ModuleName), + app.EVMKeeper, + app.AccountKeeper, + ) + app.Erc20Keeper = erc20keeper.NewKeeper( keys[erc20types.StoreKey], appCodec, @@ -599,6 +626,7 @@ func NewExampleApp( slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil, app.interfaceRegistry), distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil), staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, nil), + poolrebalancer.NewAppModule(app.PoolRebalancerKeeper), poa.NewAppModule(appCodec, app.PoaKeeper, app.BankKeeper, app.StakingKeeper, app.AccountKeeper, app.interfaceRegistry), upgrade.NewAppModule(app.UpgradeKeeper, app.AccountKeeper.AddressCodec()), evidence.NewAppModule(app.EvidenceKeeper), @@ -657,7 +685,13 @@ func NewExampleApp( // TODO: remove no-ops? check if all are no-ops before removing distrtypes.ModuleName, slashingtypes.ModuleName, - evidencetypes.ModuleName, stakingtypes.ModuleName, + // Slashing and evidence BeginBlock can change staking balances in the same block. + // Poolrebalancer BeginBlock snapshots validator slash state, so it runs after both modules. + // Staking BeginBlock (x/staking BeginBlocker) only persists/prunes HistoricalInfo, so ordering + // staking after poolrebalancer here does not affect slash-snapshot correctness. + evidencetypes.ModuleName, + poolrebalancertypes.ModuleName, + stakingtypes.ModuleName, authtypes.ModuleName, banktypes.ModuleName, govtypes.ModuleName, genutiltypes.ModuleName, authz.ModuleName, feegrant.ModuleName, consensusparamtypes.ModuleName, @@ -671,6 +705,7 @@ func NewExampleApp( banktypes.ModuleName, govtypes.ModuleName, stakingtypes.ModuleName, + poolrebalancertypes.ModuleName, // after staking; rebalances pool delegator stake poatypes.ModuleName, authtypes.ModuleName, @@ -705,6 +740,7 @@ func NewExampleApp( feemarkettypes.ModuleName, erc20types.ModuleName, precisebanktypes.ModuleName, + poolrebalancertypes.ModuleName, ibctransfertypes.ModuleName, genutiltypes.ModuleName, evidencetypes.ModuleName, authz.ModuleName, @@ -951,6 +987,14 @@ func (app *EVMD) GetKey(storeKey string) *storetypes.KVStoreKey { return app.keys[storeKey] } +// GetTKey returns the TransientStoreKey for the provided store key. +// +// NOTE: Same intent as GetKey—primarily for tests and helpers that must build module keepers with the +// app's real store keys (e.g. integration suites wiring poolrebalancer.Keeper next to the app). +func (app *EVMD) GetTKey(storeKey string) *storetypes.TransientStoreKey { + return app.tkeys[storeKey] +} + // GetSubspace returns a params subspace for a given module name. func (app *EVMD) GetSubspace(moduleName string) paramstypes.Subspace { subspace, ok := app.ParamsKeeper.GetSubspace(moduleName) diff --git a/evmd/app_begin_block_order_test.go b/evmd/app_begin_block_order_test.go new file mode 100644 index 00000000..1729badd --- /dev/null +++ b/evmd/app_begin_block_order_test.go @@ -0,0 +1,98 @@ +package evmd + +import ( + "os" + "sync" + "testing" + + "cosmossdk.io/log" + evidencetypes "cosmossdk.io/x/evidence/types" + dbm "github.com/cosmos/cosmos-db" + + srvflags "github.com/cosmos/evm/server/flags" + "github.com/cosmos/evm/testutil/constants" + poolrebalancertypes "github.com/cosmos/evm/x/poolrebalancer/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/client/flags" + simutils "github.com/cosmos/cosmos-sdk/testutil/sims" + paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" + "github.com/stretchr/testify/require" + + slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +func beginBlockModuleIndex(order []string, moduleName string) int { + for i, name := range order { + if name == moduleName { + return i + } + } + return -1 +} + +var ( + orderTestAppOnce sync.Once + orderTestApp *EVMD + orderTestAppErr error +) + +func getOrderTestApp() (*EVMD, error) { + orderTestAppOnce.Do(func() { + home, err := os.MkdirTemp("", "evmd-block-order") + if err != nil { + orderTestAppErr = err + return + } + + orderTestApp = NewExampleApp( + log.NewNopLogger(), + dbm.NewMemDB(), + nil, + true, + simutils.AppOptionsMap{ + flags.FlagHome: home, + srvflags.EVMChainID: constants.EighteenDecimalsChainID, + }, + baseapp.SetChainID(constants.ExampleChainID.ChainID), + ) + }) + return orderTestApp, orderTestAppErr +} + +// TestBeginBlockOrder_PoolRebalancerAfterSlashingAndEvidence guards the ordering required for +// previous-block slash snapshot correctness: slashing and evidence BeginBlock may update staking +// state, so poolrebalancer must snapshot slash signals after both. Staking runs after poolrebalancer +// here; x/staking BeginBlocker only tracks HistoricalInfo, so this relative order does not affect +// slash-snapshot correctness. +func TestBeginBlockOrder_PoolRebalancerAfterSlashingAndEvidence(t *testing.T) { + app, err := getOrderTestApp() + require.NoError(t, err) + + order := app.ModuleManager.OrderBeginBlockers + require.NotEmpty(t, order) + require.NotNil(t, app.GetTKey(paramstypes.TStoreKey), "params transient key must be mounted") + require.NotNil( + t, + app.GetTKey(poolrebalancertypes.TransientStoreKey), + "poolrebalancer transient key must be mounted", + ) + + iSlash := beginBlockModuleIndex(order, slashingtypes.ModuleName) + iEvidence := beginBlockModuleIndex(order, evidencetypes.ModuleName) + iPool := beginBlockModuleIndex(order, poolrebalancertypes.ModuleName) + iStake := beginBlockModuleIndex(order, stakingtypes.ModuleName) + + require.NotEqual(t, -1, iSlash, "slashing must be in OrderBeginBlockers") + require.NotEqual(t, -1, iEvidence, "evidence must be in OrderBeginBlockers") + require.NotEqual(t, -1, iPool, "poolrebalancer must be in OrderBeginBlockers") + require.NotEqual(t, -1, iStake, "staking must be in OrderBeginBlockers") + + require.Less(t, iSlash, iEvidence, + "slashing must run before evidence (downtime vs equivocation slash ordering)") + require.Less(t, iEvidence, iPool, + "equivocation evidence updates validator slash state in BeginBlock; poolrebalancer snapshot must follow") + require.Less(t, iPool, iStake, + "app orders poolrebalancer before staking BeginBlock; staking BeginBlock does not alter slash snapshot inputs") +} diff --git a/evmd/app_end_block_order_test.go b/evmd/app_end_block_order_test.go new file mode 100644 index 00000000..91312ade --- /dev/null +++ b/evmd/app_end_block_order_test.go @@ -0,0 +1,37 @@ +package evmd + +import ( + "testing" + + poolrebalancertypes "github.com/cosmos/evm/x/poolrebalancer/types" + "github.com/stretchr/testify/require" + + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +func endBlockModuleIndex(order []string, moduleName string) int { + for i, name := range order { + if name == moduleName { + return i + } + } + return -1 +} + +// TestEndBlockOrder_StakingBeforePoolRebalancer guards the ordering required for +// poolrebalancer EndBlock reconciliation from staking truth. +func TestEndBlockOrder_StakingBeforePoolRebalancer(t *testing.T) { + app, err := getOrderTestApp() + require.NoError(t, err) + + order := app.ModuleManager.OrderEndBlockers + require.NotEmpty(t, order) + + iStake := endBlockModuleIndex(order, stakingtypes.ModuleName) + iPool := endBlockModuleIndex(order, poolrebalancertypes.ModuleName) + + require.NotEqual(t, -1, iStake, "staking must be in OrderEndBlockers") + require.NotEqual(t, -1, iPool, "poolrebalancer must be in OrderEndBlockers") + require.Less(t, iStake, iPool, + "staking EndBlock must run before poolrebalancer EndBlock for reconcile correctness") +} diff --git a/evmd/config/permissions.go b/evmd/config/permissions.go index 79a3120a..6815045e 100644 --- a/evmd/config/permissions.go +++ b/evmd/config/permissions.go @@ -12,6 +12,7 @@ import ( cosmosevmutils "github.com/cosmos/evm/utils" erc20types "github.com/cosmos/evm/x/erc20/types" feemarkettypes "github.com/cosmos/evm/x/feemarket/types" + poolrebalancertypes "github.com/cosmos/evm/x/poolrebalancer/types" precisebanktypes "github.com/cosmos/evm/x/precisebank/types" vmtypes "github.com/cosmos/evm/x/vm/types" transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" @@ -66,6 +67,7 @@ var maccPerms = map[string][]string{ vmtypes.ModuleName: {authtypes.Minter, authtypes.Burner}, feemarkettypes.ModuleName: nil, erc20types.ModuleName: {authtypes.Minter, authtypes.Burner}, + poolrebalancertypes.ModuleName: nil, precisebanktypes.ModuleName: {authtypes.Minter, authtypes.Burner}, } diff --git a/evmd/tests/integration/precompiles/communitypool/precompile_communitypool_test.go b/evmd/tests/integration/precompiles/communitypool/precompile_communitypool_test.go new file mode 100644 index 00000000..5456a9d7 --- /dev/null +++ b/evmd/tests/integration/precompiles/communitypool/precompile_communitypool_test.go @@ -0,0 +1,15 @@ +package communitypool + +import ( + "testing" + + evm "github.com/cosmos/evm" + "github.com/cosmos/evm/evmd/tests/integration" + communitypooltests "github.com/cosmos/evm/tests/integration/precompiles/communitypool" + testapp "github.com/cosmos/evm/testutil/app" +) + +func TestCommunityPoolPrecompileIntegrationTestSuite(t *testing.T) { + create := testapp.ToEvmAppCreator[evm.Erc20IntegrationApp](integration.CreateEvmd, "evm.Erc20IntegrationApp") + communitypooltests.TestCommunityPoolIntegrationSuite(t, create) +} diff --git a/evmd/tests/integration/x_poolrebalancer_test.go b/evmd/tests/integration/x_poolrebalancer_test.go new file mode 100644 index 00000000..9a27dadd --- /dev/null +++ b/evmd/tests/integration/x_poolrebalancer_test.go @@ -0,0 +1,22 @@ +// Pool rebalancer integration tests must be built with -tags=test (singular) so x/evm’s test-only +// EVMConfigurator.ResetTestConfig is included (see x/vm/types/config.go). +// +// Example: go test -tags=test ./evmd/tests/integration -run TestPoolRebalancerKeeperIntegrationTestSuite -count=1 +package integration + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + evm "github.com/cosmos/evm" + testapp "github.com/cosmos/evm/testutil/app" + + poolrebalancer "github.com/cosmos/evm/tests/integration/x/poolrebalancer" +) + +func TestPoolRebalancerKeeperIntegrationTestSuite(t *testing.T) { + create := testapp.ToEvmAppCreator[evm.IntegrationNetworkApp](CreateEvmd, "evm.IntegrationNetworkApp") + s := poolrebalancer.NewKeeperIntegrationTestSuite(create) + suite.Run(t, s) +} diff --git a/interfaces.go b/interfaces.go index 21e3d4db..cd4f0923 100644 --- a/interfaces.go +++ b/interfaces.go @@ -45,6 +45,7 @@ type TestApp interface { ChainID() string DefaultGenesis() map[string]json.RawMessage GetKey(storeKey string) *storetypes.KVStoreKey + GetTKey(storeKey string) *storetypes.TransientStoreKey GetBaseApp() *baseapp.BaseApp LastCommitID() storetypes.CommitID LastBlockHeight() int64 @@ -132,6 +133,8 @@ type ( } KeyProvider interface { GetKey(storeKey string) *storetypes.KVStoreKey + // GetTKey returns a registered TransientStoreKey (e.g. poolrebalancer per-block snapshot store). + GetTKey(storeKey string) *storetypes.TransientStoreKey } MempoolProvider interface { GetMempool() mempool.ExtMempool diff --git a/multi_node_startup.sh b/multi_node_startup.sh index 211c84c4..0cd665d7 100755 --- a/multi_node_startup.sh +++ b/multi_node_startup.sh @@ -7,26 +7,35 @@ KEYALGO="eth_secp256k1" LOGLEVEL="info" BASEFEE=10000000 BASEDIR="${BASEDIR:-"$HOME/.og-evm-devnet"}" +VALIDATOR_COUNT="${VALIDATOR_COUNT:-3}" +DEV_ACCOUNT_COUNT="${DEV_ACCOUNT_COUNT:-10}" +SCRIPT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/$(basename "${BASH_SOURCE[0]}")" NODE_NUMBER="${NODE_NUMBER:-}" START_VALIDATOR="${START_VALIDATOR:-false}" +START_ALL_VALIDATORS="${START_ALL_VALIDATORS:-false}" GENERATE_GENESIS="${GENERATE_GENESIS:-false}" -VAL0_MNEMONIC="" -VAL1_MNEMONIC="" -VAL2_MNEMONIC="" - get_p2p_port() { echo $((26656 + ($1 * 100))); } get_rpc_port() { echo $((26657 + ($1 * 100))); } get_grpc_port() { echo $((9090 + ($1 * 10))); } get_jsonrpc_port() { echo $((8545 + ($1 * 10))); } get_val_mnemonic() { - case $1 in - 0) echo "$VAL0_MNEMONIC" ;; - 1) echo "$VAL1_MNEMONIC" ;; - 2) echo "$VAL2_MNEMONIC" ;; - esac + local idx="$1" + local var_name="VAL${idx}_MNEMONIC" + echo "${!var_name:-}" +} + +auto_generate_validator_mnemonic() { + local idx="$1" + local tmp_home key_name out mnemonic + tmp_home="$(mktemp -d "${TMPDIR:-/tmp}/multi-node-mnemonic-${idx}-XXXXXX")" + key_name="autoval${idx}" + out="$(evmd keys add "$key_name" --keyring-backend test --algo "$KEYALGO" --home "$tmp_home" 2>&1 || true)" + mnemonic="$(echo "$out" | awk 'NF{line=$0} END{print line}')" + rm -rf "$tmp_home" + echo "$mnemonic" } get_home_dir() { echo "$BASEDIR/val$1"; } @@ -39,9 +48,12 @@ usage() { echo "Usage: $0 [options]" echo "" echo "Environment Variables:" - echo " GENERATE_GENESIS=true Generate genesis for all 3 validators" + echo " GENERATE_GENESIS=true Generate genesis for all validators" echo " START_VALIDATOR=true Start a validator" - echo " NODE_NUMBER=0|1|2 Which validator to start" + echo " START_ALL_VALIDATORS=true Start all validators (val0 foreground, others background)" + echo " NODE_NUMBER=0..N-1 Which validator to start" + echo " VALIDATOR_COUNT=3 Validator count for genesis/startup" + echo " DEV_ACCOUNT_COUNT=10 Number of funded dev accounts to generate" echo " BASEDIR=path Base directory (default: ~/.og-evm-devnet)" echo "" echo "Options:" @@ -94,6 +106,11 @@ apply_config_customizations() { local RPC_PORT=$(get_rpc_port $NODE_NUM) local GRPC_PORT=$(get_grpc_port $NODE_NUM) local JSONRPC_PORT=$(get_jsonrpc_port $NODE_NUM) + local PROM_PORT=$((26660 + NODE_NUM)) + local PPROF_PORT=$((6060 + NODE_NUM)) + local WS_PORT=$((8546 + NODE_NUM)) + local GETH_METRICS_PORT=$((8100 + NODE_NUM)) + local EVM_METRICS_PORT=$((6065 + NODE_NUM)) sed -i.bak 's/timeout_propose = "3s"/timeout_propose = "2s"/g' "$CONFIG_TOML" sed -i.bak 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "200ms"/g' "$CONFIG_TOML" @@ -108,6 +125,10 @@ apply_config_customizations() { sed -i.bak "s|laddr = \"tcp://0.0.0.0:26656\"|laddr = \"tcp://0.0.0.0:${P2P_PORT}\"|g" "$CONFIG_TOML" sed -i.bak 's/prometheus = false/prometheus = true/' "$CONFIG_TOML" + sed -i.bak 's/addr_book_strict = true/addr_book_strict = false/' "$CONFIG_TOML" + sed -i.bak 's/allow_duplicate_ip = false/allow_duplicate_ip = true/' "$CONFIG_TOML" + sed -i.bak "s|prometheus_listen_addr = \":26660\"|prometheus_listen_addr = \":${PROM_PORT}\"|g" "$CONFIG_TOML" + sed -i.bak "s|pprof_laddr = \"localhost:6060\"|pprof_laddr = \"localhost:${PPROF_PORT}\"|g" "$CONFIG_TOML" sed -i.bak 's/prometheus-retention-time = "0"/prometheus-retention-time = "1000000000000"/g' "$APP_TOML" sed -i.bak 's/enabled = false/enabled = true/g' "$APP_TOML" sed -i.bak 's/enable = false/enable = true/g' "$APP_TOML" @@ -119,6 +140,10 @@ apply_config_customizations() { sed -i.bak "s|address = \"127.0.0.1:8545\"|address = \"0.0.0.0:${JSONRPC_PORT}\"|g" "$APP_TOML" sed -i.bak "s|address = \"0.0.0.0:8545\"|address = \"0.0.0.0:${JSONRPC_PORT}\"|g" "$APP_TOML" + sed -i.bak "s|geth-metrics-address = \"127.0.0.1:8100\"|geth-metrics-address = \"127.0.0.1:${GETH_METRICS_PORT}\"|g" "$APP_TOML" + sed -i.bak "s|ws-address = \"127.0.0.1:8546\"|ws-address = \"127.0.0.1:${WS_PORT}\"|g" "$APP_TOML" + sed -i.bak "s|metrics-address = \"127.0.0.1:6065\"|metrics-address = \"127.0.0.1:${EVM_METRICS_PORT}\"|g" "$APP_TOML" + rm -f "$CONFIG_TOML.bak" "$APP_TOML.bak" } @@ -130,7 +155,7 @@ set_persistent_peers() { local CONFIG_TOML="$HOME_DIR/config/config.toml" local PEERS="" - for i in 0 1 2; do + for i in $(seq 0 $((VALIDATOR_COUNT - 1))); do if [[ $i -ne $NODE_NUM ]]; then local PEER_PORT=$(get_p2p_port $i) if [[ -n "$PEERS" ]]; then @@ -189,8 +214,13 @@ generate_dev_accounts() { } generate_genesis() { + if [[ ! "$DEV_ACCOUNT_COUNT" =~ ^[0-9]+$ ]] || (( DEV_ACCOUNT_COUNT < 1 )); then + echo "Error: DEV_ACCOUNT_COUNT must be a positive integer (got: $DEV_ACCOUNT_COUNT)" >&2 + exit 1 + fi + echo "==========================================" - echo "Generating genesis for 3 validators..." + echo "Generating genesis for $VALIDATOR_COUNT validators..." echo "Base directory: $BASEDIR" echo "==========================================" @@ -212,10 +242,20 @@ generate_genesis() { echo "" echo ">>> Step 1: Initializing all validators..." - for i in 0 1 2; do + for i in $(seq 0 $((VALIDATOR_COUNT - 1))); do HOME_DIR=$(get_home_dir $i) MNEMONIC=$(get_val_mnemonic $i) VALKEY="val${i}" + if [[ -z "$MNEMONIC" ]]; then + MNEMONIC="$(auto_generate_validator_mnemonic "$i")" + if [[ -z "$MNEMONIC" ]]; then + echo "Error: VAL${i}_MNEMONIC is required for validator $i" >&2 + exit 1 + fi + export "VAL${i}_MNEMONIC=$MNEMONIC" + echo "Auto-generated mnemonic for validator $i" + fi + echo "--- Initializing validator $i at $HOME_DIR ---" @@ -238,7 +278,7 @@ generate_genesis() { echo "" echo ">>> Step 3: Adding all validator accounts with initial balances..." - for i in 0 1 2; do + for i in $(seq 0 $((VALIDATOR_COUNT - 1))); do VALKEY="val${i}" VAL_HOME=$(get_home_dir $i) @@ -250,18 +290,18 @@ generate_genesis() { echo "" echo ">>> Step 4: Generating dev accounts..." - generate_dev_accounts 10 "$(get_home_dir 0)" + generate_dev_accounts "$DEV_ACCOUNT_COUNT" "$(get_home_dir 0)" echo "" echo ">>> Step 5: Copying genesis to all validators..." - for i in 1 2; do + for i in $(seq 1 $((VALIDATOR_COUNT - 1))); do cp "$GENESIS" "$(get_home_dir $i)/config/genesis.json" echo "Copied genesis to val$i" done echo "" echo ">>> Step 6: Creating gentx for each validator..." - for i in 0 1 2; do + for i in $(seq 0 $((VALIDATOR_COUNT - 1))); do HOME_DIR=$(get_home_dir $i) VALKEY="val${i}" P2P_PORT=$(get_p2p_port $i) @@ -279,7 +319,7 @@ generate_genesis() { echo "" echo ">>> Step 7: Collecting all gentxs..." GENTX_DIR="$(get_home_dir 0)/config/gentx" - for i in 1 2; do + for i in $(seq 1 $((VALIDATOR_COUNT - 1))); do cp "$(get_home_dir $i)/config/gentx/"*.json "$GENTX_DIR/" echo "Copied gentx from val$i" done @@ -291,22 +331,23 @@ generate_genesis() { echo "" echo ">>> Step 8: Distributing final genesis to all validators..." FINAL_GENESIS="$(get_home_dir 0)/config/genesis.json" - for i in 1 2; do + for i in $(seq 1 $((VALIDATOR_COUNT - 1))); do cp "$FINAL_GENESIS" "$(get_home_dir $i)/config/genesis.json" echo "Copied final genesis to val$i" done echo "" echo ">>> Step 9: Applying config customizations and setting peers..." - for i in 0 1 2; do + for i in $(seq 0 $((VALIDATOR_COUNT - 1))); do HOME_DIR=$(get_home_dir $i) apply_config_customizations "$HOME_DIR" "$i" set_persistent_peers "$HOME_DIR" "$i" "${NODE_IDS[@]}" done - echo "NODE0_ID=${NODE_IDS[0]}" > "$BASEDIR/node_ids.txt" - echo "NODE1_ID=${NODE_IDS[1]}" >> "$BASEDIR/node_ids.txt" - echo "NODE2_ID=${NODE_IDS[2]}" >> "$BASEDIR/node_ids.txt" + : > "$BASEDIR/node_ids.txt" + for i in $(seq 0 $((VALIDATOR_COUNT - 1))); do + echo "NODE${i}_ID=${NODE_IDS[$i]}" >> "$BASEDIR/node_ids.txt" + done echo "" echo "==========================================" @@ -317,14 +358,19 @@ generate_genesis() { echo " $BASEDIR/" echo " ├── val0/ (Validator 0 home)" echo " ├── val1/ (Validator 1 home)" - echo " ├── val2/ (Validator 2 home)" - echo " ├── dev_accounts.txt (10 funded dev accounts)" + if (( VALIDATOR_COUNT >= 3 )); then + echo " ├── val2/ (Validator 2 home)" + fi + if (( VALIDATOR_COUNT > 3 )); then + echo " ├── ... (Validator 3..$((VALIDATOR_COUNT - 1)) home)" + fi + echo " ├── dev_accounts.txt (${DEV_ACCOUNT_COUNT} funded dev accounts)" echo " └── node_ids.txt" echo "" echo "Port mapping:" - echo " val0: P2P=26656, RPC=26657, gRPC=9090, JSON-RPC=8545" - echo " val1: P2P=26756, RPC=26757, gRPC=9091, JSON-RPC=8546" - echo " val2: P2P=26856, RPC=26857, gRPC=9092, JSON-RPC=8547" + for i in $(seq 0 $((VALIDATOR_COUNT - 1))); do + echo " val${i}: P2P=$(get_p2p_port "$i"), RPC=$(get_rpc_port "$i"), gRPC=$(get_grpc_port "$i"), JSON-RPC=$(get_jsonrpc_port "$i")" + done echo "" echo "Validators funded: 100000000000000000000000000ogwei each" echo "Dev accounts funded: 1000000000000000000000000ogwei each" @@ -332,14 +378,25 @@ generate_genesis() { echo "==========================================" } +start_all_validators() { + local i + mkdir -p "$BASEDIR/logs" + for i in $(seq 1 $((VALIDATOR_COUNT - 1))); do + echo "Starting validator $i in background (logs: $BASEDIR/logs/val${i}.log)" + START_VALIDATOR=true NODE_NUMBER="$i" VALIDATOR_COUNT="$VALIDATOR_COUNT" BASEDIR="$BASEDIR" \ + bash "$SCRIPT_PATH" >"$BASEDIR/logs/val${i}.log" 2>&1 & + done + START_VALIDATOR=true NODE_NUMBER=0 VALIDATOR_COUNT="$VALIDATOR_COUNT" BASEDIR="$BASEDIR" bash "$SCRIPT_PATH" +} + start_validator() { if [[ -z "$NODE_NUMBER" ]]; then - echo "Error: NODE_NUMBER env variable required (0, 1, or 2)" + echo "Error: NODE_NUMBER env variable required (0..$((VALIDATOR_COUNT - 1)))" exit 1 fi - if [[ ! "$NODE_NUMBER" =~ ^[0-2]$ ]]; then - echo "Error: NODE_NUMBER must be 0, 1, or 2" + if [[ ! "$NODE_NUMBER" =~ ^[0-9]+$ ]] || (( NODE_NUMBER < 0 || NODE_NUMBER >= VALIDATOR_COUNT )); then + echo "Error: NODE_NUMBER must be between 0 and $((VALIDATOR_COUNT - 1))" exit 1 fi @@ -384,7 +441,11 @@ if [[ "$START_VALIDATOR" == "true" ]]; then start_validator fi -if [[ "$GENERATE_GENESIS" != "true" && "$START_VALIDATOR" != "true" ]]; then +if [[ "$START_ALL_VALIDATORS" == "true" ]]; then + start_all_validators +fi + +if [[ "$GENERATE_GENESIS" != "true" && "$START_VALIDATOR" != "true" && "$START_ALL_VALIDATORS" != "true" ]]; then echo "No mode specified." echo "" echo "To generate genesis:" @@ -393,5 +454,8 @@ if [[ "$GENERATE_GENESIS" != "true" && "$START_VALIDATOR" != "true" ]]; then echo "To start a validator:" echo " START_VALIDATOR=true NODE_NUMBER=0 $0" echo "" + echo "To start all validators:" + echo " START_ALL_VALIDATORS=true $0" + echo "" usage fi diff --git a/precompiles/staking/README.md b/precompiles/staking/README.md index 9dd4c7d3..3c92cc8f 100644 --- a/precompiles/staking/README.md +++ b/precompiles/staking/README.md @@ -90,6 +90,13 @@ function delegate( uint256 amount ) external returns (bool success); +// Delegate across bonded validators with equal split +function delegateToBondedValidators( + address delegatorAddress, + uint256 amount, + uint32 maxValidators +) external returns (uint256 delegatedAmount, uint32 validatorsUsed); + // Undelegate tokens from a validator function undelegate( address delegatorAddress, @@ -97,6 +104,13 @@ function undelegate( uint256 amount ) external returns (int64 completionTime); +// Undelegate across bonded validators using deterministic largest-first selection +function undelegateFromBondedValidators( + address delegatorAddress, + uint256 amount, + uint32 maxValidators +) external returns (uint256 undelegatedAmount, uint32 validatorsUsed, int64 maturityTime); + // Redelegate tokens between validators function redelegate( address delegatorAddress, @@ -173,10 +187,27 @@ The precompile uses standard gas configuration for storage operations. ### Delegation Operations - **Delegate**: Stakes tokens with a validator, receiving shares in return +- **Delegate to Bonded Validators**: Delegates one amount across up to `maxValidators` bonded validators in precompile order - **Undelegate**: Initiates unbonding process (subject to unbonding period) +- **Undelegate from Bonded Validators**: Undelegates one amount across up to `maxValidators` bonded validators using deterministic largest-first selection - **Redelegate**: Moves stake between validators without unbonding period - **Cancel Unbonding**: Reverses an unbonding delegation before completion +### `delegateToBondedValidators` Policy + +- **Cap/order**: Uses the first `maxValidators` entries returned by bonded validator query order. +- **Split/remainder**: Uses integer split `amount / n`; remainder `amount % n` is distributed as `+1` to first validators deterministically. +- **Return shape**: Returns `(delegatedAmount, validatorsUsed)`. +- **Atomicity**: If any internal delegate operation fails, the whole transaction reverts and no partial staking state is persisted. + +### `undelegateFromBondedValidators` Policy + +- **Selection**: Considers bonded delegations only, then sorts by delegation amount descending and validator address ascending. +- **Cap/order**: Processes candidates in that deterministic order up to `maxValidators`. +- **Return shape**: Returns `(undelegatedAmount, validatorsUsed, maturityTime)` where `maturityTime` is the max completion time across internal undelegations. +- **Exactness**: Requires exact fulfillment of requested amount; otherwise transaction reverts. +- **Atomicity**: If any internal undelegate operation fails, the whole transaction reverts and no partial undelegation state is persisted. + ### Address Formats - **Validator addresses**: Can be either Ethereum hex or Cosmos bech32 format diff --git a/precompiles/staking/StakingI.sol b/precompiles/staking/StakingI.sol index 3c55d0d9..69feea94 100644 --- a/precompiles/staking/StakingI.sol +++ b/precompiles/staking/StakingI.sol @@ -174,6 +174,18 @@ interface StakingI { uint256 amount ) external returns (bool success); + /// @dev Defines a method for delegating a total amount across bonded validators. + /// @param delegatorAddress The address of the delegator. + /// @param amount The total amount of bond denomination to delegate. + /// @param maxValidators Max bonded validators to include (first N in precompile order). + /// @return delegatedAmount The total amount actually delegated. + /// @return validatorsUsed Number of validators used for the delegation. + function delegateToBondedValidators( + address delegatorAddress, + uint256 amount, + uint32 maxValidators + ) external returns (uint256 delegatedAmount, uint32 validatorsUsed); + /// @dev Defines a method for performing an undelegation from a delegate and a validator. /// @param delegatorAddress The address of the delegator /// @param validatorAddress The address of the validator @@ -186,6 +198,19 @@ interface StakingI { uint256 amount ) external returns (int64 completionTime); + /// @dev Defines a method for undelegating a total amount across bonded validators. + /// @param delegatorAddress The address of the delegator. + /// @param amount The total amount of bond denomination to undelegate. + /// @param maxValidators Max bonded validators to undelegate from. + /// @return undelegatedAmount The total amount actually undelegated. + /// @return validatorsUsed Number of validators used for the undelegation. + /// @return maturityTime The maximum completion time across internal undelegations. + function undelegateFromBondedValidators( + address delegatorAddress, + uint256 amount, + uint32 maxValidators + ) external returns (uint256 undelegatedAmount, uint32 validatorsUsed, int64 maturityTime); + /// @dev Defines a method for performing a redelegation /// of coins from a delegator and source validator to a destination validator. /// @param delegatorAddress The address of the delegator diff --git a/precompiles/staking/abi.json b/precompiles/staking/abi.json index 7871943d..43ecfb9d 100644 --- a/precompiles/staking/abi.json +++ b/precompiles/staking/abi.json @@ -324,6 +324,40 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [ + { + "internalType": "address", + "name": "delegatorAddress", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint32", + "name": "maxValidators", + "type": "uint32" + } + ], + "name": "delegateToBondedValidators", + "outputs": [ + { + "internalType": "uint256", + "name": "delegatedAmount", + "type": "uint256" + }, + { + "internalType": "uint32", + "name": "validatorsUsed", + "type": "uint32" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [ { @@ -803,6 +837,45 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [ + { + "internalType": "address", + "name": "delegatorAddress", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint32", + "name": "maxValidators", + "type": "uint32" + } + ], + "name": "undelegateFromBondedValidators", + "outputs": [ + { + "internalType": "uint256", + "name": "undelegatedAmount", + "type": "uint256" + }, + { + "internalType": "uint32", + "name": "validatorsUsed", + "type": "uint32" + }, + { + "internalType": "int64", + "name": "maturityTime", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [ { diff --git a/precompiles/staking/staking.go b/precompiles/staking/staking.go index 624fafab..f929d23a 100644 --- a/precompiles/staking/staking.go +++ b/precompiles/staking/staking.go @@ -114,8 +114,12 @@ func (p Precompile) Execute(ctx sdk.Context, stateDB vm.StateDB, contract *vm.Co bz, err = p.EditValidator(ctx, contract, stateDB, method, args) case DelegateMethod: bz, err = p.Delegate(ctx, contract, stateDB, method, args) + case DelegateToBondedValidatorsMethod: + bz, err = p.DelegateToBondedValidators(ctx, contract, stateDB, method, args) case UndelegateMethod: bz, err = p.Undelegate(ctx, contract, stateDB, method, args) + case UndelegateFromBondedValidatorsMethod: + bz, err = p.UndelegateFromBondedValidators(ctx, contract, stateDB, method, args) case RedelegateMethod: bz, err = p.Redelegate(ctx, contract, stateDB, method, args) case CancelUnbondingDelegationMethod: @@ -146,7 +150,9 @@ func (p Precompile) Execute(ctx sdk.Context, stateDB vm.StateDB, contract *vm.Co // - CreateValidator // - EditValidator // - Delegate +// - DelegateToBondedValidators // - Undelegate +// - UndelegateFromBondedValidators // - Redelegate // - CancelUnbondingDelegation func (Precompile) IsTransaction(method *abi.Method) bool { @@ -154,7 +160,9 @@ func (Precompile) IsTransaction(method *abi.Method) bool { case CreateValidatorMethod, EditValidatorMethod, DelegateMethod, + DelegateToBondedValidatorsMethod, UndelegateMethod, + UndelegateFromBondedValidatorsMethod, RedelegateMethod, CancelUnbondingDelegationMethod: return true diff --git a/precompiles/staking/tx.go b/precompiles/staking/tx.go index 6ab7996e..e9f925d7 100644 --- a/precompiles/staking/tx.go +++ b/precompiles/staking/tx.go @@ -3,6 +3,8 @@ package staking import ( "errors" "fmt" + "math/big" + "sort" "github.com/ethereum/go-ethereum/accounts/abi" ethtypes "github.com/ethereum/go-ethereum/core/types" @@ -10,7 +12,11 @@ import ( cmn "github.com/cosmos/evm/precompiles/common" + "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" ) const ( @@ -21,9 +27,15 @@ const ( // DelegateMethod defines the ABI method name for the staking Delegate // transaction. DelegateMethod = "delegate" + // DelegateToBondedValidatorsMethod defines the ABI method name for delegating + // equally across the bonded validator set in a single transaction. + DelegateToBondedValidatorsMethod = "delegateToBondedValidators" // UndelegateMethod defines the ABI method name for the staking Undelegate // transaction. UndelegateMethod = "undelegate" + // UndelegateFromBondedValidatorsMethod defines the ABI method name for + // undelegating across bonded validators in a single transaction. + UndelegateFromBondedValidatorsMethod = "undelegateFromBondedValidators" // RedelegateMethod defines the ABI method name for the staking Redelegate // transaction. RedelegateMethod = "redelegate" @@ -185,6 +197,259 @@ func (p *Precompile) Delegate( return method.Outputs.Pack(true) } +// DelegateToBondedValidators delegates equally across bonded validators. +// The validator set and ordering come from the staking query layer. Callers that need a different +// target policy should rebalance afterward rather than treating this remainder order as canonical. +func (p *Precompile) DelegateToBondedValidators( + ctx sdk.Context, + contract *vm.Contract, + stateDB vm.StateDB, + method *abi.Method, + args []interface{}, +) ([]byte, error) { + input, err := NewDelegateToBondedValidatorsArgs(args) + if err != nil { + return nil, err + } + + msgSender := contract.Caller() + if msgSender != input.DelegatorAddress { + return nil, fmt.Errorf(cmn.ErrRequesterIsNotMsgSender, msgSender.String(), input.DelegatorAddress.String()) + } + + bondDenom, err := p.stakingKeeper.BondDenom(ctx) + if err != nil { + return nil, err + } + + res, err := p.stakingQuerier.Validators(ctx, &stakingtypes.QueryValidatorsRequest{ + Status: stakingtypes.BondStatusBonded, + Pagination: &query.PageRequest{ + Limit: uint64(input.MaxValidators), + }, + }) + if err != nil { + return nil, err + } + if len(res.Validators) == 0 { + return nil, errors.New("no bonded validators found") + } + + delegatorAddrStr, err := p.addrCdc.BytesToString(input.DelegatorAddress.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode delegator address: %w", err) + } + + validatorCount := uint32(len(res.Validators)) + baseAmount := new(big.Int).Div(input.Amount, big.NewInt(int64(validatorCount))) + remainder := new(big.Int).Mod(input.Amount, big.NewInt(int64(validatorCount))).Uint64() + + totalDelegated := big.NewInt(0) + validatorsUsed := uint32(0) + for i := uint32(0); i < validatorCount; i++ { + perValidator := new(big.Int).Set(baseAmount) + if uint64(i) < remainder { + perValidator = perValidator.Add(perValidator, big.NewInt(1)) + } + // Skip zero-amount delegates (e.g. amount < validatorCount). + if perValidator.Sign() == 0 { + continue + } + + msg := &stakingtypes.MsgDelegate{ + DelegatorAddress: delegatorAddrStr, + ValidatorAddress: res.Validators[i].OperatorAddress, + Amount: sdk.Coin{ + Denom: bondDenom, + Amount: math.NewIntFromBigInt(perValidator), + }, + } + + if _, err = p.stakingMsgServer.Delegate(ctx, msg); err != nil { + return nil, err + } + if err = p.EmitDelegateEvent(ctx, stateDB, msg, input.DelegatorAddress); err != nil { + return nil, err + } + + totalDelegated.Add(totalDelegated, perValidator) + validatorsUsed++ + } + + p.Logger(ctx).Debug( + "tx called", + "method", method.Name, + "args", fmt.Sprintf( + "{ delegator_address: %s, amount: %s, max_validators: %d, delegated_amount: %s, validators_used: %d }", + input.DelegatorAddress, + input.Amount, + input.MaxValidators, + totalDelegated, + validatorsUsed, + ), + ) + + return method.Outputs.Pack(totalDelegated, validatorsUsed) +} + +// UndelegateFromBondedValidators undelegates across bonded validators. +// Selection policy is deterministic: largest delegation first, tie-broken by +// validator address ascending. +func (p *Precompile) UndelegateFromBondedValidators( + ctx sdk.Context, + contract *vm.Contract, + stateDB vm.StateDB, + method *abi.Method, + args []interface{}, +) ([]byte, error) { + input, err := NewUndelegateFromBondedValidatorsArgs(args) + if err != nil { + return nil, err + } + + msgSender := contract.Caller() + if msgSender != input.DelegatorAddress { + return nil, fmt.Errorf(cmn.ErrRequesterIsNotMsgSender, msgSender.String(), input.DelegatorAddress.String()) + } + + bondDenom, err := p.stakingKeeper.BondDenom(ctx) + if err != nil { + return nil, err + } + + delegatorAddrStr, err := p.addrCdc.BytesToString(input.DelegatorAddress.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode delegator address: %w", err) + } + + type candidateUndelegation struct { + validatorAddress string + amount *big.Int + } + + candidates := make([]candidateUndelegation, 0) + var nextKey []byte + for { + delegationsRes, err := p.stakingQuerier.DelegatorDelegations(ctx, &stakingtypes.QueryDelegatorDelegationsRequest{ + DelegatorAddr: delegatorAddrStr, + Pagination: &query.PageRequest{ + Key: nextKey, + Limit: 200, + }, + }) + if err != nil { + return nil, err + } + + for _, delResp := range delegationsRes.DelegationResponses { + amount := delResp.Balance.Amount.BigInt() + if amount.Sign() <= 0 { + continue + } + + validatorRes, err := p.stakingQuerier.Validator(ctx, &stakingtypes.QueryValidatorRequest{ + ValidatorAddr: delResp.Delegation.ValidatorAddress, + }) + if err != nil { + return nil, err + } + if validatorRes.Validator.Status != stakingtypes.Bonded { + continue + } + + candidates = append(candidates, candidateUndelegation{ + validatorAddress: delResp.Delegation.ValidatorAddress, + amount: amount, + }) + } + if delegationsRes.Pagination == nil || len(delegationsRes.Pagination.NextKey) == 0 { + break + } + nextKey = delegationsRes.Pagination.NextKey + } + + if len(candidates) == 0 { + return nil, errors.New("no bonded delegations found") + } + + sort.Slice(candidates, func(i, j int) bool { + cmp := candidates[i].amount.Cmp(candidates[j].amount) + if cmp != 0 { + return cmp > 0 + } + return candidates[i].validatorAddress < candidates[j].validatorAddress + }) + + remaining := new(big.Int).Set(input.Amount) + undelegatedAmount := big.NewInt(0) + validatorsUsed := uint32(0) + var maturityTime int64 + + for _, candidate := range candidates { + if remaining.Sign() == 0 || validatorsUsed >= input.MaxValidators { + break + } + + stepAmount := new(big.Int).Set(remaining) + if candidate.amount.Cmp(stepAmount) < 0 { + stepAmount = new(big.Int).Set(candidate.amount) + } + if stepAmount.Sign() == 0 { + continue + } + + msg := &stakingtypes.MsgUndelegate{ + DelegatorAddress: delegatorAddrStr, + ValidatorAddress: candidate.validatorAddress, + Amount: sdk.Coin{ + Denom: bondDenom, + Amount: math.NewIntFromBigInt(stepAmount), + }, + } + + res, err := p.stakingMsgServer.Undelegate(ctx, msg) + if err != nil { + return nil, err + } + + completion := res.CompletionTime.UTC().Unix() + if completion > maturityTime { + maturityTime = completion + } + if err = p.EmitUnbondEvent(ctx, stateDB, msg, input.DelegatorAddress, completion); err != nil { + return nil, err + } + + undelegatedAmount.Add(undelegatedAmount, stepAmount) + remaining.Sub(remaining, stepAmount) + validatorsUsed++ + } + + if remaining.Sign() > 0 { + return nil, fmt.Errorf( + "insufficient bonded delegations to undelegate requested amount: requested=%s undelegated=%s", + input.Amount.String(), + undelegatedAmount.String(), + ) + } + + p.Logger(ctx).Debug( + "tx called", + "method", method.Name, + "args", fmt.Sprintf( + "{ delegator_address: %s, amount: %s, max_validators: %d, undelegated_amount: %s, validators_used: %d, maturity_time: %d }", + input.DelegatorAddress, + input.Amount, + input.MaxValidators, + undelegatedAmount, + validatorsUsed, + maturityTime, + ), + ) + + return method.Outputs.Pack(undelegatedAmount, validatorsUsed, maturityTime) +} + // Undelegate performs the undelegation of coins from a validator for a delegate. // The provided amount cannot be negative. This is validated in the msg.ValidateBasic() function. func (p Precompile) Undelegate( diff --git a/precompiles/staking/types.go b/precompiles/staking/types.go index 7c8baae5..e1cc284d 100644 --- a/precompiles/staking/types.go +++ b/precompiles/staking/types.go @@ -76,6 +76,22 @@ type EventCancelUnbonding struct { CreationHeight *big.Int } +// DelegateToBondedValidatorsArgs is the parsed input for delegating across +// bonded validators. +type DelegateToBondedValidatorsArgs struct { + DelegatorAddress common.Address + Amount *big.Int + MaxValidators uint32 +} + +// UndelegateFromBondedValidatorsArgs is the parsed input for undelegating +// across bonded validators. +type UndelegateFromBondedValidatorsArgs struct { + DelegatorAddress common.Address + Amount *big.Int + MaxValidators uint32 +} + // Description defines a validator description. type Description = struct { Moniker string `json:"moniker"` @@ -376,6 +392,76 @@ func NewMsgCancelUnbondingDelegation(args []interface{}, denom string, addrCdc a return msg, delegatorAddr, nil } +// NewDelegateToBondedValidatorsArgs validates and parses arguments for the +// delegateToBondedValidators transaction. +func NewDelegateToBondedValidatorsArgs(args []interface{}) (*DelegateToBondedValidatorsArgs, error) { + if len(args) != 3 { + return nil, fmt.Errorf(cmn.ErrInvalidNumberOfArgs, 3, len(args)) + } + + delegatorAddr, ok := args[0].(common.Address) + if !ok || delegatorAddr == (common.Address{}) { + return nil, fmt.Errorf(cmn.ErrInvalidDelegator, args[0]) + } + + amount, ok := args[1].(*big.Int) + if !ok { + return nil, fmt.Errorf(cmn.ErrInvalidAmount, args[1]) + } + if amount.Sign() <= 0 { + return nil, errors.New("amount must be greater than zero") + } + + maxValidators, ok := args[2].(uint32) + if !ok { + return nil, fmt.Errorf(cmn.ErrInvalidType, "maxValidators", "uint32", args[2]) + } + if maxValidators == 0 { + return nil, errors.New("maxValidators must be greater than zero") + } + + return &DelegateToBondedValidatorsArgs{ + DelegatorAddress: delegatorAddr, + Amount: amount, + MaxValidators: maxValidators, + }, nil +} + +// NewUndelegateFromBondedValidatorsArgs validates and parses arguments for the +// undelegateFromBondedValidators transaction. +func NewUndelegateFromBondedValidatorsArgs(args []interface{}) (*UndelegateFromBondedValidatorsArgs, error) { + if len(args) != 3 { + return nil, fmt.Errorf(cmn.ErrInvalidNumberOfArgs, 3, len(args)) + } + + delegatorAddr, ok := args[0].(common.Address) + if !ok || delegatorAddr == (common.Address{}) { + return nil, fmt.Errorf(cmn.ErrInvalidDelegator, args[0]) + } + + amount, ok := args[1].(*big.Int) + if !ok { + return nil, fmt.Errorf(cmn.ErrInvalidAmount, args[1]) + } + if amount.Sign() <= 0 { + return nil, errors.New("amount must be greater than zero") + } + + maxValidators, ok := args[2].(uint32) + if !ok { + return nil, fmt.Errorf(cmn.ErrInvalidType, "maxValidators", "uint32", args[2]) + } + if maxValidators == 0 { + return nil, errors.New("maxValidators must be greater than zero") + } + + return &UndelegateFromBondedValidatorsArgs{ + DelegatorAddress: delegatorAddr, + Amount: amount, + MaxValidators: maxValidators, + }, nil +} + // NewDelegationRequest creates a new QueryDelegationRequest instance and does sanity checks // on the given arguments before populating the request. func NewDelegationRequest(args []interface{}, addrCdc address.Codec) (*stakingtypes.QueryDelegationRequest, error) { diff --git a/precompiles/staking/types_test.go b/precompiles/staking/types_test.go index 8a7d609c..98920eab 100644 --- a/precompiles/staking/types_test.go +++ b/precompiles/staking/types_test.go @@ -645,3 +645,197 @@ func TestNewUnbondingDelegationRequest(t *testing.T) { }) } } + +func TestNewDelegateToBondedValidatorsArgs(t *testing.T) { + delegatorAddr := common.HexToAddress("0x1234567890123456789012345678901234567890") + amount := big.NewInt(1000000000) + maxValidators := uint32(8) + + tests := []struct { + name string + args []interface{} + wantErr bool + errMsg string + wantDelegatorAddr common.Address + wantAmount *big.Int + wantMaxValidators uint32 + }{ + { + name: "valid", + args: []interface{}{delegatorAddr, amount, maxValidators}, + wantErr: false, + wantDelegatorAddr: delegatorAddr, + wantAmount: amount, + wantMaxValidators: maxValidators, + }, + { + name: "no arguments", + args: []interface{}{}, + wantErr: true, + errMsg: fmt.Sprintf(cmn.ErrInvalidNumberOfArgs, 3, 0), + }, + { + name: "too many arguments", + args: []interface{}{delegatorAddr, amount, maxValidators, "extra"}, + wantErr: true, + errMsg: fmt.Sprintf(cmn.ErrInvalidNumberOfArgs, 3, 4), + }, + { + name: "invalid delegator type", + args: []interface{}{"not-an-address", amount, maxValidators}, + wantErr: true, + errMsg: fmt.Sprintf(cmn.ErrInvalidDelegator, "not-an-address"), + }, + { + name: "empty delegator address", + args: []interface{}{common.Address{}, amount, maxValidators}, + wantErr: true, + errMsg: fmt.Sprintf(cmn.ErrInvalidDelegator, common.Address{}), + }, + { + name: "invalid amount type", + args: []interface{}{delegatorAddr, "not-a-big-int", maxValidators}, + wantErr: true, + errMsg: fmt.Sprintf(cmn.ErrInvalidAmount, "not-a-big-int"), + }, + { + name: "zero amount", + args: []interface{}{delegatorAddr, big.NewInt(0), maxValidators}, + wantErr: true, + errMsg: "amount must be greater than zero", + }, + { + name: "negative amount", + args: []interface{}{delegatorAddr, big.NewInt(-1), maxValidators}, + wantErr: true, + errMsg: "amount must be greater than zero", + }, + { + name: "invalid max validators type", + args: []interface{}{delegatorAddr, amount, "not-uint32"}, + wantErr: true, + errMsg: fmt.Sprintf(cmn.ErrInvalidType, "maxValidators", "uint32", "not-uint32"), + }, + { + name: "zero max validators", + args: []interface{}{delegatorAddr, amount, uint32(0)}, + wantErr: true, + errMsg: "maxValidators must be greater than zero", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parsed, err := NewDelegateToBondedValidatorsArgs(tt.args) + + if tt.wantErr { + require.Error(t, err) + require.Contains(t, err.Error(), tt.errMsg) + require.Nil(t, parsed) + } else { + require.NoError(t, err) + require.NotNil(t, parsed) + require.Equal(t, tt.wantDelegatorAddr, parsed.DelegatorAddress) + require.Equal(t, tt.wantAmount, parsed.Amount) + require.Equal(t, tt.wantMaxValidators, parsed.MaxValidators) + } + }) + } +} + +func TestNewUndelegateFromBondedValidatorsArgs(t *testing.T) { + delegatorAddr := common.HexToAddress("0x1234567890123456789012345678901234567890") + amount := big.NewInt(1000000000) + maxValidators := uint32(8) + + tests := []struct { + name string + args []interface{} + wantErr bool + errMsg string + wantDelegatorAddr common.Address + wantAmount *big.Int + wantMaxValidators uint32 + }{ + { + name: "valid", + args: []interface{}{delegatorAddr, amount, maxValidators}, + wantErr: false, + wantDelegatorAddr: delegatorAddr, + wantAmount: amount, + wantMaxValidators: maxValidators, + }, + { + name: "no arguments", + args: []interface{}{}, + wantErr: true, + errMsg: fmt.Sprintf(cmn.ErrInvalidNumberOfArgs, 3, 0), + }, + { + name: "too many arguments", + args: []interface{}{delegatorAddr, amount, maxValidators, "extra"}, + wantErr: true, + errMsg: fmt.Sprintf(cmn.ErrInvalidNumberOfArgs, 3, 4), + }, + { + name: "invalid delegator type", + args: []interface{}{"not-an-address", amount, maxValidators}, + wantErr: true, + errMsg: fmt.Sprintf(cmn.ErrInvalidDelegator, "not-an-address"), + }, + { + name: "empty delegator address", + args: []interface{}{common.Address{}, amount, maxValidators}, + wantErr: true, + errMsg: fmt.Sprintf(cmn.ErrInvalidDelegator, common.Address{}), + }, + { + name: "invalid amount type", + args: []interface{}{delegatorAddr, "not-a-big-int", maxValidators}, + wantErr: true, + errMsg: fmt.Sprintf(cmn.ErrInvalidAmount, "not-a-big-int"), + }, + { + name: "zero amount", + args: []interface{}{delegatorAddr, big.NewInt(0), maxValidators}, + wantErr: true, + errMsg: "amount must be greater than zero", + }, + { + name: "negative amount", + args: []interface{}{delegatorAddr, big.NewInt(-1), maxValidators}, + wantErr: true, + errMsg: "amount must be greater than zero", + }, + { + name: "invalid max validators type", + args: []interface{}{delegatorAddr, amount, "not-uint32"}, + wantErr: true, + errMsg: fmt.Sprintf(cmn.ErrInvalidType, "maxValidators", "uint32", "not-uint32"), + }, + { + name: "zero max validators", + args: []interface{}{delegatorAddr, amount, uint32(0)}, + wantErr: true, + errMsg: "maxValidators must be greater than zero", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parsed, err := NewUndelegateFromBondedValidatorsArgs(tt.args) + + if tt.wantErr { + require.Error(t, err) + require.Contains(t, err.Error(), tt.errMsg) + require.Nil(t, parsed) + } else { + require.NoError(t, err) + require.NotNil(t, parsed) + require.Equal(t, tt.wantDelegatorAddr, parsed.DelegatorAddress) + require.Equal(t, tt.wantAmount, parsed.Amount) + require.Equal(t, tt.wantMaxValidators, parsed.MaxValidators) + } + }) + } +} diff --git a/proto/cosmos/poolrebalancer/v1/poolrebalancer.proto b/proto/cosmos/poolrebalancer/v1/poolrebalancer.proto new file mode 100644 index 00000000..aae7df78 --- /dev/null +++ b/proto/cosmos/poolrebalancer/v1/poolrebalancer.proto @@ -0,0 +1,58 @@ +syntax = "proto3"; +package cosmos.poolrebalancer.v1; + +import "cosmos/base/v1beta1/coin.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/cosmos/evm/x/poolrebalancer/types"; +option (gogoproto.goproto_getters_all) = false; + +// Params defines the parameters for the poolrebalancer module. +message Params { + // pool_delegator_address is the account whose stake is rebalanced. + string pool_delegator_address = 1; + + // max_target_validators caps the bonded validator set size (top N by power). + uint32 max_target_validators = 2; + + // rebalance_threshold_bp is the drift threshold in basis points. + uint32 rebalance_threshold_bp = 3; + + // max_ops_per_block caps redelegation operations per block. + uint32 max_ops_per_block = 4; + + // max_move_per_op caps the amount moved per operation (0 = no cap). + string max_move_per_op = 5 [ + (gogoproto.customtype) = "cosmossdk.io/math.Int", + (gogoproto.nullable) = false + ]; + + reserved 6; +} + +// PendingRedelegation is an in-flight redelegation tracked for transitive redelegation safety. +message PendingRedelegation { + string delegator_address = 1; + string src_validator_address = 2; + string dst_validator_address = 3; + cosmos.base.v1beta1.Coin amount = 4 [ (gogoproto.nullable) = false ]; + google.protobuf.Timestamp completion_time = 5 + [ (gogoproto.nullable) = false, (gogoproto.stdtime) = true ]; +} + +// QueuedRedelegation groups redelegations that share the same completion time. +message QueuedRedelegation { + repeated PendingRedelegation entries = 1 [ (gogoproto.nullable) = false ]; +} + +// GenesisState defines the poolrebalancer module's genesis state. +message GenesisState { + Params params = 1 [ (gogoproto.nullable) = false ]; + + // pending_redelegations allow restoring in-flight state on restart. + // They are optional for initial deployments. + repeated PendingRedelegation pending_redelegations = 2 + [ (gogoproto.nullable) = false ]; + reserved 3; +} diff --git a/proto/cosmos/poolrebalancer/v1/query.proto b/proto/cosmos/poolrebalancer/v1/query.proto new file mode 100644 index 00000000..b43dd671 --- /dev/null +++ b/proto/cosmos/poolrebalancer/v1/query.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; +package cosmos.poolrebalancer.v1; + +import "amino/amino.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; +import "cosmos/poolrebalancer/v1/poolrebalancer.proto"; +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; + +option go_package = "github.com/cosmos/evm/x/poolrebalancer/types"; +option (gogoproto.goproto_getters_all) = false; + +// Query defines the gRPC querier service. +service Query { + // Params returns the poolrebalancer module params. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (google.api.http).get = "/cosmos/poolrebalancer/v1/params"; + } + + // PendingRedelegations returns tracked in-flight redelegations. + rpc PendingRedelegations(QueryPendingRedelegationsRequest) + returns (QueryPendingRedelegationsResponse) { + option (google.api.http).get = + "/cosmos/poolrebalancer/v1/pending_redelegations"; + } + +} + +// QueryParamsRequest is the request type for the Query/Params RPC method. +message QueryParamsRequest {} + +// QueryParamsResponse is the response type for the Query/Params RPC method. +message QueryParamsResponse { + Params params = 1 + [ (gogoproto.nullable) = false, (amino.dont_omitempty) = true ]; +} + +// QueryPendingRedelegationsRequest is the request type for the Query/PendingRedelegations RPC method. +message QueryPendingRedelegationsRequest { + // pagination defines an optional pagination for the request. + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +// QueryPendingRedelegationsResponse is the response type for the Query/PendingRedelegations RPC method. +message QueryPendingRedelegationsResponse { + repeated PendingRedelegation redelegations = 1 + [ (gogoproto.nullable) = false, (amino.dont_omitempty) = true ]; + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} diff --git a/proto/cosmos/poolrebalancer/v1/tx.proto b/proto/cosmos/poolrebalancer/v1/tx.proto new file mode 100644 index 00000000..825cd653 --- /dev/null +++ b/proto/cosmos/poolrebalancer/v1/tx.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; +package cosmos.poolrebalancer.v1; + +import "amino/amino.proto"; +import "cosmos/poolrebalancer/v1/poolrebalancer.proto"; +import "cosmos/msg/v1/msg.proto"; +import "cosmos_proto/cosmos.proto"; +import "gogoproto/gogo.proto"; + +option go_package = "github.com/cosmos/evm/x/poolrebalancer/types"; + +// Msg defines the poolrebalancer Msg service. +service Msg { + option (cosmos.msg.v1.service) = true; + + // UpdateParams is a governance operation for updating the x/poolrebalancer module parameters. + // The authority is the Cosmos SDK x/gov module account. + rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse); +} + +// MsgUpdateParams defines a Msg for updating the x/poolrebalancer module parameters. +message MsgUpdateParams { + option (cosmos.msg.v1.signer) = "authority"; + option (amino.name) = "cosmos/poolrebalancer/v1/MsgUpdateParams"; + + // authority is the address of the governance account. + string authority = 1 [ (cosmos_proto.scalar) = "cosmos.AddressString" ]; + + // params defines the x/poolrebalancer parameters to update. + // NOTE: All parameters must be supplied. + Params params = 2 + [ (gogoproto.nullable) = false, (amino.dont_omitempty) = true ]; +} + +// MsgUpdateParamsResponse defines the response structure for executing a MsgUpdateParams message. +message MsgUpdateParamsResponse {} diff --git a/tests/e2e/poolrebalancer/README.md b/tests/e2e/poolrebalancer/README.md new file mode 100644 index 00000000..f4591472 --- /dev/null +++ b/tests/e2e/poolrebalancer/README.md @@ -0,0 +1,102 @@ +# Poolrebalancer E2E Scripts + +Manual E2E scripts for `x/poolrebalancer` and CommunityPool contract flows. + +## Scripts + +- `tests/e2e/poolrebalancer/rebalance_scenario_runner.sh`: boots local devnet, deploys/wires CommunityPool, seeds rebalance scenarios, and monitors pending redelegations. +- `tests/e2e/poolrebalancer/user_flow_multikey.sh`: CommunityPool multi-account journey (approve, deposit, withdraw, claimWithdraw, optional claimRewards). +- `tests/e2e/poolrebalancer/community_pool_edge_cases.sh`: phase-driven pass/fail checks for ACL, reconcile, withdraw sizing, liquidity/maturity, dust, and rewards. +- `tests/e2e/poolrebalancer/lib/pool_e2e_common.sh`: shared helpers (RPC readiness, cast send wrappers, uint parsing, snapshots, account key lookup). + +## Quick Start + +```bash +bash tests/e2e/poolrebalancer/rebalance_scenario_runner.sh --help +``` + +```bash +bash tests/e2e/poolrebalancer/rebalance_scenario_runner.sh --scenario happy_path --nodes 3 --profile medium +``` + +Print the full command reference at any time: + +```bash +bash tests/e2e/poolrebalancer/rebalance_scenario_runner.sh help +``` + +Watch mode for an already-running chain: + +```bash +bash tests/e2e/poolrebalancer/rebalance_scenario_runner.sh watch +``` + +Run user flow on an already-running and wired chain: + +```bash +bash tests/e2e/poolrebalancer/rebalance_scenario_runner.sh user_flow_multikey +``` + +Run edge-case phases (default phase is `auth` if none provided): + +```bash +bash tests/e2e/poolrebalancer/rebalance_scenario_runner.sh community_pool_edge_cases +bash tests/e2e/poolrebalancer/rebalance_scenario_runner.sh community_pool_edge_cases all +bash tests/e2e/poolrebalancer/rebalance_scenario_runner.sh community_pool_edge_cases auth,drift,withdraw_sizing +``` + +## Supported Scenarios + +- `happy_path`: baseline scheduling from skewed delegation. +- `caps`: verifies `max_ops_per_block` and `max_move_per_op`. +- `threshold_boundary`: verifies high threshold suppresses small drift scheduling. +- `expansion`: with five validators, verifies destination expansion beyond initially delegated validators. + +Aliases normalized by the runner: + +- `baseline_3val` -> `happy_path` +- `max_target_gt_bonded_3val` -> `happy_path` with higher target count +- `target_set_expansion_5val` -> `expansion` + +## Commands + +- default: full setup + seed + observation loop +- `watch`: params, pending redelegations, and pool reads +- `user_flow_multikey`: run CommunityPool multi-account flow on an already running/wired chain +- `community_pool_edge_cases [phase-spec] [options]`: run edge-case assertions on an already running/wired chain (`phase-spec` is one token such as `all` or `auth,drift,withdraw_sizing`) +- `help` / `--help` + +## Operational Notes + +- `NODE_RPC` controls CometBFT RPC (default `tcp://127.0.0.1:26657`). +- `EVM_RPC` controls `cast` RPC (default `http://127.0.0.1:8545`). +- Full run mode deploys (or reuses) CommunityPool, sets `automationCaller`, then sets `poolrebalancer.params.pool_delegator_address` through governance. +- `user_flow_multikey` and `community_pool_edge_cases` do not start validators or run governance; they wait for an already-wired chain unless `POOL_CONTRACT_ADDR` is provided. +- Scenario defaults are applied only for knobs not explicitly set in environment variables. +- `community_pool_edge_cases` default phase behavior: + - no positional arg + no `COMMUNITY_POOL_EDGE_PHASES`: runs `auth` + - positional phase arg: overrides `COMMUNITY_POOL_EDGE_PHASES` + - `all`: expands to `auth,drift,withdraw_sizing,liquidity,dust,rewards` +- If behavior is unexpected, inspect: + - `evmd query poolrebalancer params ...` + - `evmd query poolrebalancer pending-redelegations ...` + +## Useful CLI Options + +Common runner options: + +- `--scenario`, `--nodes`, `--profile` +- `--stress-profile` (`100users`/`stress100`) for `user_flow_multikey` +- `--user-count`, `--withdraw-users` +- `--flow-mode` (`serial`/`parallel`) +- `--deposit-concurrency`, `--withdraw-concurrency`, `--claim-concurrency`, `--claim-rewards-concurrency` +- `--batch-delay-ms` + +## Event Signals + +Common rebalance signals emitted in EndBlock: + +- `rebalance_summary` +- `redelegation_started` +- `redelegation_failed` +- `redelegations_completed` diff --git a/tests/e2e/poolrebalancer/community_pool_edge_cases.sh b/tests/e2e/poolrebalancer/community_pool_edge_cases.sh new file mode 100755 index 00000000..af92d17d --- /dev/null +++ b/tests/e2e/poolrebalancer/community_pool_edge_cases.sh @@ -0,0 +1,1373 @@ +#!/usr/bin/env bash +# CommunityPool edge-case E2E (incremental). +# Step 1: auth — non-owner reverts on privileged calls. +# Step 2: drift — owner syncTotalStaked(skew); poll until totalStaked matches staking bonded (reconcile recovery). +# Step 3: withdraw_sizing — optional poll for pendingRebalanceUnbondReserve>0; one withdraw(); assert amountOut +# formula and that pendingRebalanceUnbondReserve is unchanged (user withdraw does not debit it). +# Step 4: liquidity — create one withdraw request, assert claimWithdraw(requestId) reverts before maturity; +# optional stress loop retries matured claim to probe liquidity/settling behavior without making the +# deterministic phase flaky by default. +# Step 5: dust — tiny deposit/withdraw rounding reverts plus owner setConfig boundary + stake no-op checks +# with config restoration. +# Step 6: rewards — multi-harvest + claimRewards sanity and liquid reserve invariants. +# Requires: running devnet, pool wired (poolrebalancer.params.pool_delegator_address or POOL_CONTRACT_ADDR), +# BASEDIR/dev_accounts.txt; withdraw_sizing needs LP units on WITHDRAW_SIZING_ACCOUNT (default dev2). +# +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +# shellcheck source=/dev/null +source "$SCRIPT_DIR/lib/pool_e2e_common.sh" + +BASEDIR="${BASEDIR:-"$HOME/.og-evm-devnet"}" +NODE_RPC="${NODE_RPC:-tcp://127.0.0.1:26657}" +CHAIN_ID="${CHAIN_ID:-10740}" +EVM_RPC="${EVM_RPC:-http://127.0.0.1:8545}" +BOND_PRECOMPILE="${BOND_PRECOMPILE:-0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE}" +CHAIN_HOME="${CHAIN_HOME:-$BASEDIR/val0}" +POOL_CONTRACT_ADDR="${POOL_CONTRACT_ADDR:-}" +DEV_ACCOUNTS_FILE="${DEV_ACCOUNTS_FILE:-$BASEDIR/dev_accounts.txt}" + +# Phases are set by: (1) first argv to this script (auth|drift|withdraw_sizing|liquidity|all|comma list), +# (2) env COMMUNITY_POOL_EDGE_PHASES, (3) default auth only. +# Private key source: dev account that is not pool owner and not automationCaller (default dev1). +AUTH_NON_OWNER_ACCOUNT="${AUTH_NON_OWNER_ACCOUNT:-dev1}" +# Pool owner key for syncTotalStaked (default: dev0 from DEV_ACCOUNTS_FILE if unset). +POOL_OWNER_PK="${POOL_OWNER_PK:-}" +# Added to on-chain totalStaked to simulate bookkeeping drift (wei). +DRIFT_SKEW_WEI="${DRIFT_SKEW_WEI:-1000000000000000000}" +# Wall-clock timeout waiting for reconcile to restore totalStaked vs staking delegations sum. +DRIFT_RECOVER_MAX_WAIT_SECS="${DRIFT_RECOVER_MAX_WAIT_SECS:-180}" +# Optional: pool delegator bech32 override if params cannot be queried. +POOL_DEL_BECH32="${POOL_DEL_BECH32:-}" + +# Step 3: account used for withdraw sizing (script can auto-deposit if units are missing). +WITHDRAW_SIZING_ACCOUNT="${WITHDRAW_SIZING_ACCOUNT:-dev2}" +WITHDRAW_SIZING_FRACTION_BP="${WITHDRAW_SIZING_FRACTION_BP:-1000}" +# Poll for pendingRebalanceUnbondReserve > 0 (optional; 0 = skip poll). +WITHDRAW_SIZING_PENDING_RESERVE_POLL_SECS="${WITHDRAW_SIZING_PENDING_RESERVE_POLL_SECS:-60}" +WITHDRAW_SIZING_GAS_LIMIT="${WITHDRAW_SIZING_GAS_LIMIT:-9500000}" +# Ordered fallback BPs tried when withdraw() reverts at the primary fraction. +WITHDRAW_SIZING_CANDIDATE_BP_LIST="${WITHDRAW_SIZING_CANDIDATE_BP_LIST:-1000,500,200,100,50,20,10,5,1}" +# Auto-deposit fallback for withdraw_sizing when target account has no units / pool totals are zero. +WITHDRAW_SIZING_AUTO_DEPOSIT="${WITHDRAW_SIZING_AUTO_DEPOSIT:-1}" +WITHDRAW_SIZING_AUTO_DEPOSIT_USERS="${WITHDRAW_SIZING_AUTO_DEPOSIT_USERS:-3}" +WITHDRAW_SIZING_AUTO_DEPOSIT_AMOUNT_WEI="${WITHDRAW_SIZING_AUTO_DEPOSIT_AMOUNT_WEI:-100000000000000000000}" +WITHDRAW_SIZING_AUTO_DEPOSIT_INTERVAL_SECS="${WITHDRAW_SIZING_AUTO_DEPOSIT_INTERVAL_SECS:-1}" +WITHDRAW_SIZING_TOTAL_STAKED_WAIT_SECS="${WITHDRAW_SIZING_TOTAL_STAKED_WAIT_SECS:-120}" +# Step 4: claimWithdraw maturity revert always; optional best-effort retry at maturity. +LIQUIDITY_ACCOUNT="${LIQUIDITY_ACCOUNT:-$WITHDRAW_SIZING_ACCOUNT}" +LIQUIDITY_FRACTION_BP="${LIQUIDITY_FRACTION_BP:-$WITHDRAW_SIZING_FRACTION_BP}" +LIQUIDITY_CANDIDATE_BP_LIST="${LIQUIDITY_CANDIDATE_BP_LIST:-$WITHDRAW_SIZING_CANDIDATE_BP_LIST}" +LIQUIDITY_GAS_LIMIT="${LIQUIDITY_GAS_LIMIT:-$WITHDRAW_SIZING_GAS_LIMIT}" +LIQUIDITY_MATURITY_MAX_WAIT_SECS="${LIQUIDITY_MATURITY_MAX_WAIT_SECS:-300}" +CLAIM_STRESS_INSUFFICIENT_LIQUID="${CLAIM_STRESS_INSUFFICIENT_LIQUID:-0}" +CLAIM_STRESS_MAX_ATTEMPTS="${CLAIM_STRESS_MAX_ATTEMPTS:-20}" +CLAIM_STRESS_POLL_INTERVAL_SECS="${CLAIM_STRESS_POLL_INTERVAL_SECS:-2}" +# Step 5: dust / config. +DUST_ACCOUNT="${DUST_ACCOUNT:-dev1}" +DUST_SECONDARY_ACCOUNT="${DUST_SECONDARY_ACCOUNT:-dev2}" +DUST_SEED_DEPOSIT_AMOUNT_WEI="${DUST_SEED_DEPOSIT_AMOUNT_WEI:-1000000000000000000}" +DUST_BOUNDARY_MAX_VALIDATORS="${DUST_BOUNDARY_MAX_VALIDATORS:-1}" +DUST_HIGH_MIN_STAKE_AMOUNT_WEI="${DUST_HIGH_MIN_STAKE_AMOUNT_WEI:-115792089237316195423570985008687907853269984665640564039457584007913129639935}" +# Step 6: rewards / invariants. +REWARDS_ACCOUNT="${REWARDS_ACCOUNT:-dev1}" +REWARDS_HARVEST_COUNT="${REWARDS_HARVEST_COUNT:-3}" +REWARDS_HARVEST_INTERVAL_SECS="${REWARDS_HARVEST_INTERVAL_SECS:-1}" +SKIP_EMPTY_POOL_HARVEST="${SKIP_EMPTY_POOL_HARVEST:-1}" + +REQUEST_SETUP_PK="" +REQUEST_SETUP_USER_ADDR="" +REQUEST_SETUP_REQUEST_ID="" +REQUEST_SETUP_WITHDRAW_UNITS="" +REQUEST_SETUP_EXPECTED_OUT="" + +uint256_add() { + local a="${1:-0}" b="${2:-0}" + python3 -c "print(int('$a') + int('$b'))" 2>/dev/null +} + +uint256_sub_nonnegative() { + local a="${1:-0}" b="${2:-0}" + python3 -c "print(max(0, int('$a') - int('$b')))" 2>/dev/null +} + +uint256_pending_rewards_from_index() { + local units="${1:-0}" acc="${2:-0}" debt="${3:-0}" + python3 -c "print(max(0, (int('$units') * int('$acc')) // 10**18 - int('$debt')))" 2>/dev/null +} + +uint256_sub_strict() { + local a="${1:-0}" b="${2:-0}" + python3 - "$a" "$b" <<'PY' 2>/dev/null +import sys +a = int(sys.argv[1]) +b = int(sys.argv[2]) +if a < b: + sys.exit(1) +print(a - b) +PY +} + +receipt_json_effective_fee_wei() { + local json="${1:-}" + python3 - <<'PY' "$json" 2>/dev/null +import json +import sys + +raw = sys.argv[1] +obj = json.loads(raw) + +def parse_uint(v): + if v is None: + return None + if isinstance(v, int): + return v + s = str(v).strip() + if not s: + return None + if s.startswith(("0x", "0X")): + return int(s, 16) + return int(s) + +gas_used = parse_uint(obj.get("gasUsed")) +gas_price = parse_uint(obj.get("effectiveGasPrice")) +if gas_price is None: + gas_price = parse_uint(obj.get("gasPrice")) +if gas_used is None or gas_price is None: + sys.exit(1) +print(gas_used * gas_price) +PY +} + +log_flow_section() { + echo "" + echo "--------------------------------------------------------------------" + printf "==> %s\n" "$1" + shift || true + while [[ $# -gt 0 ]]; do + printf " * %s\n" "$1" + shift + done + echo "--------------------------------------------------------------------" +} + +# POOL_CONTRACT_ADDR wins for EVM 0x; pool delegator bech32 always from params (or POOL_DEL_BECH32 override). +resolve_pool_evm_addr() { + local params del_from_params + params="$(evmd query poolrebalancer params --node "$NODE_RPC" -o json 2>/dev/null || true)" + del_from_params="$(echo "$params" | jq -r '.params.pool_delegator_address // empty')" + if [[ -z "$POOL_DEL_BECH32" && -n "$del_from_params" ]]; then + POOL_DEL_BECH32="$del_from_params" + fi + + if [[ -n "$POOL_CONTRACT_ADDR" ]]; then + POOL_EVM_ADDR="$POOL_CONTRACT_ADDR" + log_flow_section "Pool contract (from env)" \ + "Using POOL_CONTRACT_ADDR from environment; pool delegator bech32 from params (or POOL_DEL_BECH32)." + echo " POOL_CONTRACT_ADDR=$POOL_EVM_ADDR" + if [[ -z "$POOL_DEL_BECH32" ]]; then + echo "error: pool_delegator_address empty and POOL_DEL_BECH32 unset; cannot query staking for drift" >&2 + exit 1 + fi + echo " pool_delegator_bech32 $POOL_DEL_BECH32" + return 0 + fi + + log_flow_section "Resolve CommunityPool from chain" \ + "Reading x/poolrebalancer params for pool_delegator_address, then mapping bech32 to EVM 0x for cast calls." + if [[ -z "$POOL_DEL_BECH32" ]]; then + echo "error: set POOL_CONTRACT_ADDR or configure poolrebalancer.params.pool_delegator_address" >&2 + exit 1 + fi + POOL_EVM_ADDR="$(resolve_evm_hex_from_bech32 "$POOL_DEL_BECH32")" + if [[ -z "$POOL_EVM_ADDR" || "$POOL_EVM_ADDR" == "0x" ]]; then + echo "error: could not resolve EVM address for pool delegator $POOL_DEL_BECH32" >&2 + exit 1 + fi + echo " pool_delegator_bech32 $POOL_DEL_BECH32" + echo " pool_evm $POOL_EVM_ADDR" +} + +phase_enabled() { + local want="$1" + local IFS=',' + local p + for p in $COMMUNITY_POOL_EDGE_PHASES; do + [[ "$p" == "$want" ]] && return 0 + done + return 1 +} + +run_phase_auth() { + local pk + log_flow_section "Phase auth" \ + "Non-owner EOA must revert: reconcileStakedBuckets (automationCaller-only), creditStakeableFromRebalance (owner|automation), syncTotalStaked (owner-only)." \ + "Signer: AUTH_NON_OWNER_ACCOUNT=$AUTH_NON_OWNER_ACCOUNT" + pk="$(dev_account_private_key_from_file "$AUTH_NON_OWNER_ACCOUNT" "$DEV_ACCOUNTS_FILE" || true)" + if [[ -z "$pk" ]]; then + echo "error: missing $AUTH_NON_OWNER_ACCOUNT in $DEV_ACCOUNTS_FILE" >&2 + exit 1 + fi + + echo " -- reconcileStakedBuckets(0,0) expect revert" + cast_send_expect_revert "$EVM_RPC" "$pk" "$POOL_EVM_ADDR" \ + "reconcileStakedBuckets(uint256,uint256)" 0 0 || exit 1 + + echo " -- creditStakeableFromRebalance(1) expect revert" + cast_send_expect_revert "$EVM_RPC" "$pk" "$POOL_EVM_ADDR" \ + "creditStakeableFromRebalance(uint256)" 1 || exit 1 + + echo " -- syncTotalStaked(1) expect revert" + cast_send_expect_revert "$EVM_RPC" "$pk" "$POOL_EVM_ADDR" \ + "syncTotalStaked(uint256)" 1 || exit 1 + + echo " auth phase: ok" +} + +resolve_pool_owner_pk() { + if [[ -n "${POOL_OWNER_PK:-}" ]]; then + return 0 + fi + POOL_OWNER_PK="$(dev_account_private_key_from_file "dev0" "$DEV_ACCOUNTS_FILE" || true)" + if [[ -z "$POOL_OWNER_PK" ]]; then + echo "error: set POOL_OWNER_PK or add dev0 to $DEV_ACCOUNTS_FILE (required for drift phase)" >&2 + exit 1 + fi +} + +# Optional: log principalAssets == stakeablePrincipalLedger + totalStaked + pendingRebalanceUnbondReserve +log_principal_assets_invariant() { + local pa spl ts pnd sum + pa="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "principalAssets()(uint256)")" + spl="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "stakeablePrincipalLedger()(uint256)")" + ts="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + pnd="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "pendingRebalanceUnbondReserve()(uint256)")" + if ! command -v python3 >/dev/null 2>&1; then + echo " principal_assets_invariant: skip (no python3)" + return 0 + fi + sum="$(python3 -c "print(int('$spl')+int('$ts')+int('$pnd'))" 2>/dev/null || echo "")" + if [[ -n "$sum" ]] && uint256_eq "$pa" "$sum"; then + echo " principal_assets_invariant: ok (principalAssets == stakeable + totalStaked + pendingRebalance)" + else + echo "warning: principal_assets_invariant mismatch principalAssets=$pa sum_stakeable_staked_pending=$sum" >&2 + fi +} + +log_contract_debug_state() { + local label="$1" + local max_retrieve max_validators min_stake total_units total_staked stakeable principal_assets + local pending_rebalance pending_withdraw matured_withdraw price_per_unit + max_retrieve="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "maxRetrieve()(uint32)")" + max_validators="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "maxValidators()(uint32)")" + min_stake="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "minStakeAmount()(uint256)")" + total_units="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalUnits()(uint256)")" + total_staked="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + stakeable="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "stakeablePrincipalLedger()(uint256)")" + principal_assets="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "principalAssets()(uint256)")" + pending_rebalance="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "pendingRebalanceUnbondReserve()(uint256)")" + pending_withdraw="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "pendingWithdrawReserve()(uint256)")" + matured_withdraw="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "maturedWithdrawReserve()(uint256)")" + price_per_unit="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "pricePerUnit()(uint256)")" + echo " -- state[$label]" + echo " maxRetrieve=$max_retrieve maxValidators=$max_validators minStakeAmount=$min_stake" + echo " totalUnits=$total_units totalStaked=$total_staked stakeablePrincipalLedger=$stakeable" + echo " principalAssets=$principal_assets pricePerUnit=$price_per_unit" + echo " pendingRebalanceUnbondReserve=$pending_rebalance pendingWithdrawReserve=$pending_withdraw maturedWithdrawReserve=$matured_withdraw" +} + +assert_liquid_reserve_invariants() { + local label="$1" + local liquid reward_reserve matured_withdraw stakeable reserved accounted_liquid + liquid="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "liquidBalance()(uint256)")" + reward_reserve="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "rewardReserve()(uint256)")" + matured_withdraw="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "maturedWithdrawReserve()(uint256)")" + stakeable="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "stakeablePrincipalLedger()(uint256)")" + reserved="$(uint256_add "$reward_reserve" "$matured_withdraw")" + accounted_liquid="$(uint256_add "$stakeable" "$reserved")" + + if uint256_gt "$reward_reserve" "$liquid"; then + echo "error: reward invariant failed [$label]: rewardReserve=$reward_reserve > liquidBalance=$liquid" >&2 + return 1 + fi + if uint256_gt "$reserved" "$liquid"; then + echo "error: liquid reserve invariant failed [$label]: rewardReserve+maturedWithdrawReserve=$reserved > liquidBalance=$liquid" >&2 + return 1 + fi + if uint256_gt "$accounted_liquid" "$liquid"; then + echo "error: accounted liquid invariant failed [$label]: stakeable+rewardReserve+maturedWithdrawReserve=$accounted_liquid > liquidBalance=$liquid" >&2 + return 1 + fi + + echo " -- invariants[$label] ok: rewardReserve=$reward_reserve liquidBalance=$liquid reserved=$reserved accountedLiquid=$accounted_liquid" +} + +run_phase_drift() { + command -v python3 >/dev/null 2>&1 || { + echo "error: drift phase requires python3 (for uint256 skew math)" >&2 + exit 1 + } + log_flow_section "Phase drift" \ + "Owner syncTotalStaked(current + DRIFT_SKEW_WEI) simulates bookkeeping drift; reconcile should restore totalStaked to the staking bonded delegation sum." \ + "Works when bonded sum is 0: skew 0→nonzero, then expect reconcile back to 0. POOL_DEL_BECH32=$POOL_DEL_BECH32 DRIFT_SKEW_WEI=$DRIFT_SKEW_WEI DRIFT_RECOVER_MAX_WAIT_SECS=$DRIFT_RECOVER_MAX_WAIT_SECS" + + resolve_pool_owner_pk + + local expected cur wrong t0 now + expected="$(staking_delegations_bond_total_wei "$NODE_RPC" "$POOL_DEL_BECH32")" + if [[ ! "$expected" =~ ^[0-9]+$ ]]; then + expected="0" + fi + + cur="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + if [[ ! "$cur" =~ ^[0-9]+$ ]]; then + echo "error: could not read CommunityPool totalStaked()" >&2 + exit 1 + fi + + wrong="$(python3 -c "print(int('$cur') + int('$DRIFT_SKEW_WEI'))")" + echo " -- staking bonded sum (bond denom) = $expected" + echo " -- contract totalStaked before skew = $cur" + echo " (this script does not deposit; totalStaked is whatever is already on-chain for this pool — e.g. leftover" + echo " from an earlier rebalance_scenario_runner seed / user_flow deposits / stake. It is not DRIFT_SKEW_WEI.)" + echo " -- drift skew: add DRIFT_SKEW_WEI=$DRIFT_SKEW_WEI to that value → syncTotalStaked($wrong)" + if [[ "$expected" == "0" ]] && [[ "$cur" == "0" ]]; then + echo " -- note: bonded sum and totalStaked both 0 — skewing up from zero; reconcile should return totalStaked to 0" + fi + if [[ "$expected" == "0" ]] && [[ "$cur" != "0" ]]; then + echo "warning: staking delegation sum is 0 but contract totalStaked is non-zero — possible empty delegations query," \ + "fully unbonded state not yet reconciled, or stale devnet; drift target is still staking_sum=$expected" >&2 + fi + + cast_send_expect_success "$EVM_RPC" "$POOL_OWNER_PK" "$POOL_EVM_ADDR" "syncTotalStaked(uint256)" "$wrong" || exit 1 + + t0="$(date +%s)" + while true; do + cur="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + expected="$(staking_delegations_bond_total_wei "$NODE_RPC" "$POOL_DEL_BECH32")" + if [[ "$cur" =~ ^[0-9]+$ ]] && [[ "$expected" =~ ^[0-9]+$ ]] && uint256_eq "$cur" "$expected"; then + echo " -- drift recovered: totalStaked=$cur matches staking bonded sum=$expected" + log_principal_assets_invariant + echo " drift phase: ok" + return 0 + fi + now="$(date +%s)" + if (( now - t0 > DRIFT_RECOVER_MAX_WAIT_SECS )); then + echo "error: drift phase timed out after ${DRIFT_RECOVER_MAX_WAIT_SECS}s (totalStaked=$cur staking_sum=$expected)" >&2 + exit 1 + fi + sleep 2 + done +} + +# Optional: wait for module rebalance unbond reserve to show non-zero. +poll_optional_pending_rebalance_reserve() { + local deadline now p + [[ "${WITHDRAW_SIZING_PENDING_RESERVE_POLL_SECS:-0}" =~ ^[0-9]+$ ]] || return 0 + (( WITHDRAW_SIZING_PENDING_RESERVE_POLL_SECS == 0 )) && { + echo " -- pendingRebalanceUnbondReserve poll skipped (WITHDRAW_SIZING_PENDING_RESERVE_POLL_SECS=0)" + return 0 + } + echo " -- polling up to ${WITHDRAW_SIZING_PENDING_RESERVE_POLL_SECS}s for pendingRebalanceUnbondReserve > 0 (optional)" + deadline=$(( $(date +%s) + WITHDRAW_SIZING_PENDING_RESERVE_POLL_SECS )) + while (( $(date +%s) < deadline )); do + p="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "pendingRebalanceUnbondReserve()(uint256)")" + if [[ "$p" =~ ^[0-9]+$ ]] && (( p > 0 )); then + echo " -- observed pendingRebalanceUnbondReserve=$p > 0" + return 0 + fi + sleep 2 + done + echo "warning: SKIP (optional): pendingRebalanceUnbondReserve still 0 after ${WITHDRAW_SIZING_PENDING_RESERVE_POLL_SECS}s — continuing withdraw_sizing assertions anyway" >&2 +} + +pool_evm_withdraw_request_amount_out() { + local pool="$1" + local rpc="$2" + local rid="$3" + local raw amount_raw amount + + [[ -z "$pool" ]] && { + printf '' + return 0 + } + + raw="$(cast call --rpc-url "$rpc" "$pool" \ + "withdrawRequests(uint256)(address,uint256,uint64,bool,bool)" "$rid" 2>/dev/null || true)" + amount_raw="$(printf '%s\n' "$raw" | awk 'NF{c++} c==2 {print $1; exit}')" + if amount="$(normalize_cast_uint256_output "$amount_raw" 2>/dev/null)"; then + printf '%s' "$amount" + else + printf '' + fi +} + +maybe_auto_deposit_for_withdraw_sizing() { + local user_units="$1" + local total_units="$2" + local total_staked="$3" + local need_seed=false + local i name pk deposited_target=false + + [[ "$WITHDRAW_SIZING_AUTO_DEPOSIT" == "1" ]] || return 0 + + if [[ ! "$user_units" =~ ^[0-9]+$ ]] || (( user_units == 0 )); then + need_seed=true + fi + if [[ ! "$total_units" =~ ^[0-9]+$ ]] || (( total_units == 0 )); then + need_seed=true + fi + if [[ ! "$total_staked" =~ ^[0-9]+$ ]] || (( total_staked == 0 )); then + need_seed=true + fi + [[ "$need_seed" == "true" ]] || return 0 + + log_flow_section "withdraw_sizing auto-deposit" \ + "Detected missing preconditions (units/stake). Auto-depositing from dev accounts, similar to user_flow_multikey." \ + "AUTO_DEPOSIT_USERS=$WITHDRAW_SIZING_AUTO_DEPOSIT_USERS AMOUNT_WEI=$WITHDRAW_SIZING_AUTO_DEPOSIT_AMOUNT_WEI" + + if [[ ! "$WITHDRAW_SIZING_AUTO_DEPOSIT_USERS" =~ ^[0-9]+$ ]] || (( WITHDRAW_SIZING_AUTO_DEPOSIT_USERS < 1 )); then + echo "error: WITHDRAW_SIZING_AUTO_DEPOSIT_USERS must be a positive integer" >&2 + exit 1 + fi + + for i in $(seq 0 $((WITHDRAW_SIZING_AUTO_DEPOSIT_USERS - 1))); do + name="dev${i}" + pk="$(dev_account_private_key_from_file "$name" "$DEV_ACCOUNTS_FILE" || true)" + if [[ -z "$pk" ]]; then + echo "warning: skipping auto-deposit for missing account $name" >&2 + continue + fi + [[ "$name" == "$WITHDRAW_SIZING_ACCOUNT" ]] && deposited_target=true + echo " -- auto-deposit from $name amount=$WITHDRAW_SIZING_AUTO_DEPOSIT_AMOUNT_WEI" + approve_and_deposit "$pk" "$POOL_EVM_ADDR" "$BOND_PRECOMPILE" "$WITHDRAW_SIZING_AUTO_DEPOSIT_AMOUNT_WEI" "$EVM_RPC" || { + echo "error: auto-deposit failed for $name" >&2 + exit 1 + } + if [[ "$WITHDRAW_SIZING_AUTO_DEPOSIT_INTERVAL_SECS" =~ ^[0-9]+$ ]] && (( WITHDRAW_SIZING_AUTO_DEPOSIT_INTERVAL_SECS > 0 )); then + sleep "$WITHDRAW_SIZING_AUTO_DEPOSIT_INTERVAL_SECS" + fi + done + + # Ensure the withdraw account itself has units even when it wasn't in the first N dev accounts. + if [[ "$deposited_target" != "true" ]]; then + pk="$(dev_account_private_key_from_file "$WITHDRAW_SIZING_ACCOUNT" "$DEV_ACCOUNTS_FILE" || true)" + if [[ -z "$pk" ]]; then + echo "error: missing $WITHDRAW_SIZING_ACCOUNT in $DEV_ACCOUNTS_FILE for auto-deposit" >&2 + exit 1 + fi + echo " -- auto-deposit target account $WITHDRAW_SIZING_ACCOUNT amount=$WITHDRAW_SIZING_AUTO_DEPOSIT_AMOUNT_WEI" + approve_and_deposit "$pk" "$POOL_EVM_ADDR" "$BOND_PRECOMPILE" "$WITHDRAW_SIZING_AUTO_DEPOSIT_AMOUNT_WEI" "$EVM_RPC" || { + echo "error: auto-deposit failed for target $WITHDRAW_SIZING_ACCOUNT" >&2 + exit 1 + } + fi + + # Wait for pool automation to stake at least some principal if needed. + if [[ "$WITHDRAW_SIZING_TOTAL_STAKED_WAIT_SECS" =~ ^[0-9]+$ ]] && (( WITHDRAW_SIZING_TOTAL_STAKED_WAIT_SECS > 0 )); then + local deadline ts_now + deadline=$(( $(date +%s) + WITHDRAW_SIZING_TOTAL_STAKED_WAIT_SECS )) + while (( $(date +%s) < deadline )); do + ts_now="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + if [[ "$ts_now" =~ ^[0-9]+$ ]] && (( ts_now > 0 )); then + echo " -- totalStaked is now $ts_now after auto-deposit" + return 0 + fi + sleep 2 + done + echo "warning: totalStaked remained 0 after auto-deposit wait (${WITHDRAW_SIZING_TOTAL_STAKED_WAIT_SECS}s)" >&2 + fi +} + +run_phase_withdraw_sizing() { + command -v python3 >/dev/null 2>&1 || { + echo "error: withdraw_sizing phase requires python3" >&2 + exit 1 + } + log_flow_section "Phase withdraw_sizing" \ + "Assert withdraw() amountOut == floor(withdrawUnits * totalStaked / totalUnits) and pendingRebalanceUnbondReserve unchanged (CommunityPool does not reduce it on user withdraw)." \ + "WITHDRAW_SIZING_ACCOUNT=$WITHDRAW_SIZING_ACCOUNT WITHDRAW_SIZING_FRACTION_BP=$WITHDRAW_SIZING_FRACTION_BP" + + local pk user_addr user_units total_units total_staked pr_before pr_after next_rid expected_out actual_out withdraw_units + local candidate_bps bp attempt_ok=false + pk="$(dev_account_private_key_from_file "$WITHDRAW_SIZING_ACCOUNT" "$DEV_ACCOUNTS_FILE" || true)" + if [[ -z "$pk" ]]; then + echo "error: missing $WITHDRAW_SIZING_ACCOUNT in $DEV_ACCOUNTS_FILE" >&2 + exit 1 + fi + user_addr="$(cast wallet address --private-key "$pk" 2>/dev/null || true)" + if [[ -z "$user_addr" ]]; then + echo "error: could not derive address for $WITHDRAW_SIZING_ACCOUNT" >&2 + exit 1 + fi + + total_staked="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + total_units="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalUnits()(uint256)")" + user_units="$(pool_evm_call_uint256_args "$POOL_EVM_ADDR" "$EVM_RPC" "unitsOf(address)(uint256)" "$user_addr")" + maybe_auto_deposit_for_withdraw_sizing "$user_units" "$total_units" "$total_staked" + + # Re-read state after optional auto-deposit. + total_staked="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + total_units="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalUnits()(uint256)")" + user_units="$(pool_evm_call_uint256_args "$POOL_EVM_ADDR" "$EVM_RPC" "unitsOf(address)(uint256)" "$user_addr")" + + if [[ ! "$total_staked" =~ ^[0-9]+$ ]] || [[ ! "$total_units" =~ ^[0-9]+$ ]] || (( total_staked == 0 )) || (( total_units == 0 )); then + echo "warning: skipping withdraw_sizing — need totalStaked>0 and totalUnits>0 (set WITHDRAW_SIZING_AUTO_DEPOSIT=1 or run user_flow first)" >&2 + return 0 + fi + if [[ ! "$user_units" =~ ^[0-9]+$ ]] || (( user_units == 0 )); then + echo "warning: skipping withdraw_sizing — $WITHDRAW_SIZING_ACCOUNT still has no pool units (auto-deposit disabled/failed; run user_flow or set WITHDRAW_SIZING_ACCOUNT)" >&2 + return 0 + fi + + poll_optional_pending_rebalance_reserve + + pr_before="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "pendingRebalanceUnbondReserve()(uint256)")" + [[ "$pr_before" =~ ^[0-9]+$ ]] || pr_before="0" + + candidate_bps="$WITHDRAW_SIZING_CANDIDATE_BP_LIST" + # Ensure the requested fraction is attempted first when it's not already first in the list. + if [[ ",$candidate_bps," != *",$WITHDRAW_SIZING_FRACTION_BP,"* ]]; then + candidate_bps="${WITHDRAW_SIZING_FRACTION_BP},${candidate_bps}" + fi + + for bp in $(printf '%s' "$candidate_bps" | tr ',' ' '); do + [[ "$bp" =~ ^[0-9]+$ ]] || continue + (( bp < 1 || bp > 10000 )) && continue + + # Re-read changing pool values for each attempt. + total_staked="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + total_units="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalUnits()(uint256)")" + user_units="$(pool_evm_call_uint256_args "$POOL_EVM_ADDR" "$EVM_RPC" "unitsOf(address)(uint256)" "$user_addr")" + if [[ ! "$total_staked" =~ ^[0-9]+$ ]] || [[ ! "$total_units" =~ ^[0-9]+$ ]] || [[ ! "$user_units" =~ ^[0-9]+$ ]] || (( total_staked == 0 )) || (( total_units == 0 )) || (( user_units == 0 )); then + continue + fi + + local _py + _py="$(python3 -c " +import sys +uu, tu, ts, bp = map(int, sys.argv[1:5]) +if tu == 0 or ts == 0: + sys.exit(2) +wu = uu * bp // 10000 +if wu == 0: + wu = uu +exp = (wu * ts) // tu +if exp == 0: + wu = uu + exp = (wu * ts) // tu +if exp == 0: + sys.exit(3) +print(wu) +print(exp) +" "$user_units" "$total_units" "$total_staked" "$bp")" || continue + withdraw_units="$(printf '%s\n' "$_py" | sed -n '1p')" + expected_out="$(printf '%s\n' "$_py" | sed -n '2p')" + [[ -n "$withdraw_units" && -n "$expected_out" ]] || continue + + next_rid="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "nextWithdrawRequestId()(uint256)")" + if [[ ! "$next_rid" =~ ^[0-9]+$ ]]; then + echo "error: could not read nextWithdrawRequestId" >&2 + exit 1 + fi + + echo " -- attempt bp=$bp user=$WITHDRAW_SIZING_ACCOUNT unitsOf=$user_units totalUnits=$total_units totalStaked=$total_staked" + echo " withdrawUnits=$withdraw_units expectedAmountOut=$expected_out pendingRebalanceUnbondReserve(before)=$pr_before" + if ( + export CAST_SEND_GAS_LIMIT="$WITHDRAW_SIZING_GAS_LIMIT" + cast_send_expect_success "$EVM_RPC" "$pk" "$POOL_EVM_ADDR" "withdraw(uint256)" "$withdraw_units" + ); then + attempt_ok=true + echo " -- withdraw succeeded with bp=$bp" + break + fi + echo " -- withdraw reverted at bp=$bp; trying smaller candidate (if any)" + sleep 1 + done + + if [[ "$attempt_ok" != "true" ]]; then + echo "error: withdraw_sizing could not find a successful withdraw amount; candidates=$candidate_bps" >&2 + echo "hint: try lower WITHDRAW_SIZING_CANDIDATE_BP_LIST or inspect precompile undelegate constraints" >&2 + exit 1 + fi + + actual_out="$(pool_evm_withdraw_request_amount_out "$POOL_EVM_ADDR" "$EVM_RPC" "$next_rid")" + if [[ -z "$actual_out" ]]; then + echo "error: could not read withdrawRequests($next_rid).amountOut" >&2 + exit 1 + fi + + if ! uint256_eq "$expected_out" "$actual_out"; then + echo "error: amountOut mismatch: expected $expected_out (floor(withdrawUnits*totalStaked/totalUnits)) got $actual_out" >&2 + exit 1 + fi + echo " -- withdrawRequests($next_rid).amountOut=$actual_out (matches formula)" + + pr_after="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "pendingRebalanceUnbondReserve()(uint256)")" + [[ "$pr_after" =~ ^[0-9]+$ ]] || pr_after="0" + if ! uint256_eq "$pr_before" "$pr_after"; then + echo "error: pendingRebalanceUnbondReserve changed after user withdraw (before=$pr_before after=$pr_after); expected unchanged" >&2 + exit 1 + fi + echo " -- pendingRebalanceUnbondReserve unchanged: $pr_after" + + echo " withdraw_sizing phase: ok" +} + +block_timestamp_unix() { + cast block latest --rpc-url "$EVM_RPC" --json 2>/dev/null | jq -r '.timestamp // empty' | python3 -c " +import sys +s = sys.stdin.read().strip() +if not s: + print(0) +elif s.startswith('0x') or s.startswith('0X'): + print(int(s, 16)) +else: + print(int(s)) +" +} + +withdraw_request_maturity_unix() { + local rid="$1" + cast call --rpc-url "$EVM_RPC" "$POOL_EVM_ADDR" \ + "withdrawRequests(uint256)(address,uint256,uint64,bool,bool)" "$rid" 2>/dev/null \ + | python3 -c " +import sys +parts = sys.stdin.read().split() +if len(parts) < 3: + print(0) + sys.exit(0) +m = parts[2] +if m.startswith('0x') or m.startswith('0X'): + print(int(m, 16)) +else: + print(int(m.split('[')[0].strip())) +" +} + +wait_until_withdraw_request_mature_or_timeout() { + local rid="$1" + local max_sec="${2:-300}" + local start mt bt + start="$(date +%s)" + mt="$(withdraw_request_maturity_unix "$rid")" + if [[ "$mt" == "0" ]]; then + sleep 3 + mt="$(withdraw_request_maturity_unix "$rid")" + fi + if [[ "$mt" == "0" ]]; then + echo "warning: requestId=$rid has maturity 0; skipping optional liquidity stress" >&2 + return 1 + fi + echo " -- requestId=$rid maturityUnix=$mt; waiting for latest block time to reach maturity (max ${max_sec}s)" + while true; do + bt="$(block_timestamp_unix)" + if [[ "$bt" =~ ^[0-9]+$ ]] && (( bt >= mt )); then + echo " -- maturity reached: latest blockTime=$bt >= maturityTime=$mt" + return 0 + fi + if (( $(date +%s) - start > max_sec )); then + echo "warning: timed out waiting for requestId=$rid maturity (blockTime=$bt maturity=$mt)" >&2 + return 1 + fi + sleep 2 + done +} + +submit_withdraw_request_for_account() { + local account_name="$1" + local gas_limit="$2" + local fraction_bp="$3" + local candidate_bps="$4" + local pk user_addr user_units total_units total_staked bp next_rid withdraw_units expected_out + local attempt_ok=false + + pk="$(dev_account_private_key_from_file "$account_name" "$DEV_ACCOUNTS_FILE" || true)" + if [[ -z "$pk" ]]; then + echo "error: missing $account_name in $DEV_ACCOUNTS_FILE" >&2 + return 1 + fi + user_addr="$(cast wallet address --private-key "$pk" 2>/dev/null || true)" + if [[ -z "$user_addr" ]]; then + echo "error: could not derive address for $account_name" >&2 + return 1 + fi + + total_staked="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + total_units="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalUnits()(uint256)")" + user_units="$(pool_evm_call_uint256_args "$POOL_EVM_ADDR" "$EVM_RPC" "unitsOf(address)(uint256)" "$user_addr")" + maybe_auto_deposit_for_withdraw_sizing "$user_units" "$total_units" "$total_staked" + + total_staked="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + total_units="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalUnits()(uint256)")" + user_units="$(pool_evm_call_uint256_args "$POOL_EVM_ADDR" "$EVM_RPC" "unitsOf(address)(uint256)" "$user_addr")" + + if [[ ! "$total_staked" =~ ^[0-9]+$ ]] || [[ ! "$total_units" =~ ^[0-9]+$ ]] || (( total_staked == 0 )) || (( total_units == 0 )); then + echo "warning: skipping request setup for $account_name — need totalStaked>0 and totalUnits>0" >&2 + return 2 + fi + if [[ ! "$user_units" =~ ^[0-9]+$ ]] || (( user_units == 0 )); then + echo "warning: skipping request setup for $account_name — account has no pool units" >&2 + return 2 + fi + + if [[ ",$candidate_bps," != *",$fraction_bp,"* ]]; then + candidate_bps="${fraction_bp},${candidate_bps}" + fi + + for bp in $(printf '%s' "$candidate_bps" | tr ',' ' '); do + [[ "$bp" =~ ^[0-9]+$ ]] || continue + (( bp < 1 || bp > 10000 )) && continue + + total_staked="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + total_units="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalUnits()(uint256)")" + user_units="$(pool_evm_call_uint256_args "$POOL_EVM_ADDR" "$EVM_RPC" "unitsOf(address)(uint256)" "$user_addr")" + if [[ ! "$total_staked" =~ ^[0-9]+$ ]] || [[ ! "$total_units" =~ ^[0-9]+$ ]] || [[ ! "$user_units" =~ ^[0-9]+$ ]] || (( total_staked == 0 )) || (( total_units == 0 )) || (( user_units == 0 )); then + continue + fi + + local _py + _py="$(python3 -c " +import sys +uu, tu, ts, bp = map(int, sys.argv[1:5]) +if tu == 0 or ts == 0: + sys.exit(2) +wu = uu * bp // 10000 +if wu == 0: + wu = uu +exp = (wu * ts) // tu +if exp == 0: + wu = uu + exp = (wu * ts) // tu +if exp == 0: + sys.exit(3) +print(wu) +print(exp) +" "$user_units" "$total_units" "$total_staked" "$bp")" || continue + withdraw_units="$(printf '%s\n' "$_py" | sed -n '1p')" + expected_out="$(printf '%s\n' "$_py" | sed -n '2p')" + [[ -n "$withdraw_units" && -n "$expected_out" ]] || continue + + next_rid="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "nextWithdrawRequestId()(uint256)")" + if [[ ! "$next_rid" =~ ^[0-9]+$ ]]; then + echo "error: could not read nextWithdrawRequestId" >&2 + return 1 + fi + + echo " -- attempt bp=$bp user=$account_name unitsOf=$user_units totalUnits=$total_units totalStaked=$total_staked" + echo " withdrawUnits=$withdraw_units expectedAmountOut=$expected_out nextWithdrawRequestId=$next_rid" + if ( + export CAST_SEND_GAS_LIMIT="$gas_limit" + cast_send_expect_success "$EVM_RPC" "$pk" "$POOL_EVM_ADDR" "withdraw(uint256)" "$withdraw_units" + ); then + attempt_ok=true + REQUEST_SETUP_PK="$pk" + REQUEST_SETUP_USER_ADDR="$user_addr" + REQUEST_SETUP_REQUEST_ID="$next_rid" + REQUEST_SETUP_WITHDRAW_UNITS="$withdraw_units" + REQUEST_SETUP_EXPECTED_OUT="$expected_out" + break + fi + echo " -- withdraw reverted at bp=$bp; trying smaller candidate (if any)" + sleep 1 + done + + if [[ "$attempt_ok" != "true" ]]; then + echo "error: could not find a successful withdraw amount for $account_name; candidates=$candidate_bps" >&2 + return 1 + fi +} + +run_phase_liquidity() { + command -v python3 >/dev/null 2>&1 || { + echo "error: liquidity phase requires python3" >&2 + exit 1 + } + log_flow_section "Phase liquidity" \ + "Create one withdraw request, then assert claimWithdraw(requestId) reverts before maturity." \ + "Optional CLAIM_STRESS_INSUFFICIENT_LIQUID=1 waits for maturity and retries claimWithdraw best-effort to probe liquidity settling." \ + "LIQUIDITY_ACCOUNT=$LIQUIDITY_ACCOUNT LIQUIDITY_FRACTION_BP=$LIQUIDITY_FRACTION_BP" + + REQUEST_SETUP_PK="" + REQUEST_SETUP_USER_ADDR="" + REQUEST_SETUP_REQUEST_ID="" + REQUEST_SETUP_WITHDRAW_UNITS="" + REQUEST_SETUP_EXPECTED_OUT="" + + local pk user_addr rid withdraw_units expected_out + submit_withdraw_request_for_account "$LIQUIDITY_ACCOUNT" "$LIQUIDITY_GAS_LIMIT" "$LIQUIDITY_FRACTION_BP" "$LIQUIDITY_CANDIDATE_BP_LIST" || { + case "$?" in + 2) return 0 ;; + *) exit 1 ;; + esac + } + + pk="$REQUEST_SETUP_PK" + user_addr="$REQUEST_SETUP_USER_ADDR" + rid="$REQUEST_SETUP_REQUEST_ID" + withdraw_units="$REQUEST_SETUP_WITHDRAW_UNITS" + expected_out="$REQUEST_SETUP_EXPECTED_OUT" + + echo " -- submitted withdraw requestId=$rid withdrawUnits=$withdraw_units expectedAmountOut=$expected_out" + echo " -- claimWithdraw($rid) before maturity should revert" + ( + export CAST_SEND_GAS_LIMIT="$LIQUIDITY_GAS_LIMIT" + cast_send_expect_revert "$EVM_RPC" "$pk" "$POOL_EVM_ADDR" "claimWithdraw(uint256)" "$rid" + ) || { + echo "error: expected pre-maturity claimWithdraw($rid) to revert" >&2 + exit 1 + } + echo " -- pre-maturity claimWithdraw($rid) reverted as expected" + + if [[ "$CLAIM_STRESS_INSUFFICIENT_LIQUID" == "1" ]]; then + local attempt=0 + log_flow_section "Optional liquidity stress" \ + "Best-effort only: wait for request maturity, then retry claimWithdraw to observe whether liquid principal is available by then." \ + "This does not fail the deterministic phase if liquidity remains insufficient or the pool is still settling." + if wait_until_withdraw_request_mature_or_timeout "$rid" "$LIQUIDITY_MATURITY_MAX_WAIT_SECS"; then + while (( attempt < CLAIM_STRESS_MAX_ATTEMPTS )); do + if ( + export CAST_SEND_GAS_LIMIT="$LIQUIDITY_GAS_LIMIT" + cast_send_expect_success "$EVM_RPC" "$pk" "$POOL_EVM_ADDR" "claimWithdraw(uint256)" "$rid" + ); then + echo " -- optional stress claim succeeded for requestId=$rid owner=$LIQUIDITY_ACCOUNT addr=$user_addr" + break + fi + attempt=$((attempt + 1)) + echo " -- optional stress retry $attempt/$CLAIM_STRESS_MAX_ATTEMPTS (likely insufficient liquid or pool still settling)" + sleep "$CLAIM_STRESS_POLL_INTERVAL_SECS" + done + if (( attempt >= CLAIM_STRESS_MAX_ATTEMPTS )); then + echo "warning: optional liquidity stress did not claim requestId=$rid after $CLAIM_STRESS_MAX_ATTEMPTS attempts" >&2 + fi + fi + fi + + echo " liquidity phase: ok" +} + +run_phase_dust() { + command -v python3 >/dev/null 2>&1 || { + echo "error: dust phase requires python3" >&2 + exit 1 + } + + log_flow_section "Phase dust" \ + "Assert deposit(1) reverts when price/unit rounds minted units to 0; assert withdraw(1) reverts when amountOut rounds to 0." \ + "Then set owner config boundaries: maxValidators=0 must revert; maxValidators=$DUST_BOUNDARY_MAX_VALIDATORS with minStake=$DUST_HIGH_MIN_STAKE_AMOUNT_WEI must make stake() a no-op; restore config at the end." \ + "DUST_ACCOUNT=$DUST_ACCOUNT DUST_SECONDARY_ACCOUNT=$DUST_SECONDARY_ACCOUNT DUST_SEED_DEPOSIT_AMOUNT_WEI=$DUST_SEED_DEPOSIT_AMOUNT_WEI" + + resolve_pool_owner_pk + + local status=0 + local owner_pk primary_pk secondary_pk primary_addr secondary_addr + local old_max_retrieve old_max_validators old_min_stake old_total_staked + local read_max_retrieve read_max_validators read_min_stake restored_total_staked + local stakeable_before_stake total_staked_before_stake stakeable_after_stake total_staked_after_stake + local total_units_for_dust target_total_staked_for_zero_mint + local stakeable_before_restore final_stakeable expected_post_restore_total_staked + + owner_pk="$POOL_OWNER_PK" + primary_pk="$(dev_account_private_key_from_file "$DUST_ACCOUNT" "$DEV_ACCOUNTS_FILE" || true)" + secondary_pk="$(dev_account_private_key_from_file "$DUST_SECONDARY_ACCOUNT" "$DEV_ACCOUNTS_FILE" || true)" + if [[ -z "$primary_pk" ]]; then + echo "error: missing $DUST_ACCOUNT in $DEV_ACCOUNTS_FILE" >&2 + exit 1 + fi + if [[ -z "$secondary_pk" ]]; then + echo "error: missing $DUST_SECONDARY_ACCOUNT in $DEV_ACCOUNTS_FILE" >&2 + exit 1 + fi + primary_addr="$(cast wallet address --private-key "$primary_pk" 2>/dev/null || true)" + secondary_addr="$(cast wallet address --private-key "$secondary_pk" 2>/dev/null || true)" + if [[ -z "$primary_addr" || -z "$secondary_addr" ]]; then + echo "error: could not derive dust account addresses" >&2 + exit 1 + fi + + old_max_retrieve="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "maxRetrieve()(uint32)")" + old_max_validators="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "maxValidators()(uint32)")" + old_min_stake="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "minStakeAmount()(uint256)")" + old_total_staked="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + if [[ ! "$old_max_retrieve" =~ ^[0-9]+$ ]] || [[ ! "$old_max_validators" =~ ^[0-9]+$ ]] || [[ ! "$old_min_stake" =~ ^[0-9]+$ ]] || [[ ! "$old_total_staked" =~ ^[0-9]+$ ]]; then + echo "error: could not read original CommunityPool config/state before dust phase" >&2 + exit 1 + fi + log_contract_debug_state "dust:start" + + echo " -- setConfig(maxValidators=0) should revert" + if ! cast_send_expect_revert "$EVM_RPC" "$owner_pk" "$POOL_EVM_ADDR" \ + "setConfig(uint32,uint32,uint256)" "$old_max_retrieve" 0 "$old_min_stake"; then + echo "error: expected setConfig(..., maxValidators=0, ...) to revert" >&2 + status=1 + fi + + if (( status == 0 )); then + echo " -- setConfig boundary: maxValidators=$DUST_BOUNDARY_MAX_VALIDATORS minStakeAmount=$DUST_HIGH_MIN_STAKE_AMOUNT_WEI" + if ! cast_send_expect_success "$EVM_RPC" "$owner_pk" "$POOL_EVM_ADDR" \ + "setConfig(uint32,uint32,uint256)" "$old_max_retrieve" "$DUST_BOUNDARY_MAX_VALIDATORS" "$DUST_HIGH_MIN_STAKE_AMOUNT_WEI"; then + echo "error: setConfig boundary update failed" >&2 + status=1 + fi + fi + + if (( status == 0 )); then + read_max_retrieve="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "maxRetrieve()(uint32)")" + read_max_validators="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "maxValidators()(uint32)")" + read_min_stake="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "minStakeAmount()(uint256)")" + if ! uint256_eq "$read_max_retrieve" "$old_max_retrieve" || ! uint256_eq "$read_max_validators" "$DUST_BOUNDARY_MAX_VALIDATORS" || ! uint256_eq "$read_min_stake" "$DUST_HIGH_MIN_STAKE_AMOUNT_WEI"; then + echo "error: boundary config readback mismatch maxRetrieve=$read_max_retrieve maxValidators=$read_max_validators minStakeAmount=$read_min_stake" >&2 + status=1 + else + echo " -- boundary config applied: maxRetrieve=$read_max_retrieve maxValidators=$read_max_validators minStakeAmount=$read_min_stake" + fi + log_contract_debug_state "dust:after-boundary-config" + fi + + if (( status == 0 )); then + echo " -- seed deposit from $DUST_ACCOUNT amount=$DUST_SEED_DEPOSIT_AMOUNT_WEI under high minStake" + if ! approve_and_deposit "$primary_pk" "$POOL_EVM_ADDR" "$BOND_PRECOMPILE" "$DUST_SEED_DEPOSIT_AMOUNT_WEI" "$EVM_RPC"; then + echo "error: dust seed approve+deposit failed for $DUST_ACCOUNT" >&2 + status=1 + fi + log_contract_debug_state "dust:after-seed-deposit" + fi + + if (( status == 0 )); then + stakeable_before_stake="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "stakeablePrincipalLedger()(uint256)")" + total_staked_before_stake="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + echo " -- stake() under elevated minStake should no-op" + if ! cast_send_expect_success "$EVM_RPC" "$owner_pk" "$POOL_EVM_ADDR" "stake()"; then + echo "error: stake() tx failed under elevated minStake" >&2 + status=1 + fi + fi + + if (( status == 0 )); then + stakeable_after_stake="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "stakeablePrincipalLedger()(uint256)")" + total_staked_after_stake="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + if ! uint256_eq "$stakeable_before_stake" "$stakeable_after_stake" || ! uint256_eq "$total_staked_before_stake" "$total_staked_after_stake"; then + echo "error: expected stake() no-op under elevated minStake; before stakeable=$stakeable_before_stake totalStaked=$total_staked_before_stake after stakeable=$stakeable_after_stake totalStaked=$total_staked_after_stake" >&2 + status=1 + else + echo " -- stake() no-op confirmed: stakeablePrincipalLedger=$stakeable_after_stake totalStaked=$total_staked_after_stake" + fi + fi + + if (( status == 0 )); then + total_units_for_dust="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalUnits()(uint256)")" + if [[ ! "$total_units_for_dust" =~ ^[0-9]+$ ]] || ! uint256_gt "$total_units_for_dust" 1; then + echo "error: dust phase needs totalUnits > 1 after seed deposit (got $total_units_for_dust)" >&2 + log_contract_debug_state "dust:bad-total-units" + status=1 + fi + fi + + if (( status == 0 )); then + target_total_staked_for_zero_mint="$(python3 -c "print(int('$total_units_for_dust') + 1)")" + echo " -- syncTotalStaked($target_total_staked_for_zero_mint) so deposit(1) rounds minted units to 0" + if ! cast_send_expect_success "$EVM_RPC" "$owner_pk" "$POOL_EVM_ADDR" "syncTotalStaked(uint256)" "$target_total_staked_for_zero_mint"; then + echo "error: failed to inflate totalStaked for dust deposit test" >&2 + status=1 + fi + fi + + if (( status == 0 )); then + echo " -- approve + deposit(1) from $DUST_SECONDARY_ACCOUNT should revert with zero minted units" + if ! cast_send_expect_success "$EVM_RPC" "$secondary_pk" "$BOND_PRECOMPILE" \ + "approve(address,uint256)" "$POOL_EVM_ADDR" 1; then + echo "error: approve(1) failed for $DUST_SECONDARY_ACCOUNT" >&2 + status=1 + elif ! cast_send_expect_revert "$EVM_RPC" "$secondary_pk" "$POOL_EVM_ADDR" "deposit(uint256)" 1; then + echo "error: expected deposit(1) dust path to revert" >&2 + log_contract_debug_state "dust:deposit1-unexpected-success" + status=1 + else + echo " -- deposit(1) reverted as expected under zero-minted-units conditions" + fi + fi + + if (( status == 0 )); then + echo " -- syncTotalStaked(1) so withdraw(1) rounds amountOut to 0" + if ! cast_send_expect_success "$EVM_RPC" "$owner_pk" "$POOL_EVM_ADDR" "syncTotalStaked(uint256)" 1; then + echo "error: failed to set totalStaked=1 for dust withdraw test" >&2 + status=1 + elif ! cast_send_expect_revert "$EVM_RPC" "$primary_pk" "$POOL_EVM_ADDR" "withdraw(uint256)" 1; then + echo "error: expected withdraw(1) dust path to revert" >&2 + log_contract_debug_state "dust:withdraw1-unexpected-success" + status=1 + else + echo " -- withdraw(1) reverted as expected when amountOut rounds to 0" + fi + fi + + log_contract_debug_state "dust:before-restore" + stakeable_before_restore="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "stakeablePrincipalLedger()(uint256)")" + [[ "$stakeable_before_restore" =~ ^[0-9]+$ ]] || stakeable_before_restore="0" + + echo " -- restoring original totalStaked while elevated minStake still prevents auto-stake" + if ! cast_send_expect_success "$EVM_RPC" "$owner_pk" "$POOL_EVM_ADDR" "syncTotalStaked(uint256)" "$old_total_staked"; then + echo "error: failed to restore original totalStaked after dust phase" >&2 + status=1 + fi + restored_total_staked="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + if ! uint256_eq "$restored_total_staked" "$old_total_staked"; then + echo "error: totalStaked restore failed before config restore got $restored_total_staked expected $old_total_staked" >&2 + log_contract_debug_state "dust:restore-totalstaked-mismatch" + status=1 + else + echo " -- original totalStaked restored under elevated minStake: $restored_total_staked" + fi + + echo " -- restoring original config" + if ! cast_send_expect_success "$EVM_RPC" "$owner_pk" "$POOL_EVM_ADDR" \ + "setConfig(uint32,uint32,uint256)" "$old_max_retrieve" "$old_max_validators" "$old_min_stake"; then + echo "error: failed to restore original config after dust phase" >&2 + status=1 + fi + + read_max_retrieve="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "maxRetrieve()(uint32)")" + read_max_validators="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "maxValidators()(uint32)")" + read_min_stake="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "minStakeAmount()(uint256)")" + restored_total_staked="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + final_stakeable="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "stakeablePrincipalLedger()(uint256)")" + if ! uint256_eq "$read_max_retrieve" "$old_max_retrieve" || ! uint256_eq "$read_max_validators" "$old_max_validators" || ! uint256_eq "$read_min_stake" "$old_min_stake"; then + echo "error: restored config mismatch maxRetrieve=$read_max_retrieve expected=$old_max_retrieve maxValidators=$read_max_validators expected=$old_max_validators minStakeAmount=$read_min_stake expected=$old_min_stake" >&2 + log_contract_debug_state "dust:restore-config-mismatch" + status=1 + else + echo " -- original config restored" + fi + + expected_post_restore_total_staked="$(uint256_add "$old_total_staked" "$stakeable_before_restore")" + if uint256_eq "$restored_total_staked" "$old_total_staked"; then + echo " -- post-restore totalStaked unchanged at original value: $restored_total_staked" + elif uint256_eq "$restored_total_staked" "$expected_post_restore_total_staked" && uint256_eq "$final_stakeable" 0; then + echo " -- post-restore totalStaked advanced by prior liquid stakeable=$stakeable_before_restore after minStake restoration" + echo " this is expected live-chain automation, not a contract/module bug" + else + echo "error: unexpected post-restore state totalStaked=$restored_total_staked old_total_staked=$old_total_staked stakeable_before_restore=$stakeable_before_restore final_stakeable=$final_stakeable" >&2 + log_contract_debug_state "dust:unexpected-post-restore" + status=1 + fi + log_contract_debug_state "dust:after-restore" + + if (( status != 0 )); then + exit 1 + fi + + echo " dust phase: ok" +} + +run_phase_rewards() { + command -v python3 >/dev/null 2>&1 || { + echo "error: rewards phase requires python3" >&2 + exit 1 + } + + log_flow_section "Phase rewards" \ + "Run owner harvest() ${REWARDS_HARVEST_COUNT}x, then claimRewards() for one dev account and assert sane deltas plus liquid reserve invariants." \ + "REWARDS_ACCOUNT=$REWARDS_ACCOUNT SKIP_EMPTY_POOL_HARVEST=$SKIP_EMPTY_POOL_HARVEST REWARDS_HARVEST_INTERVAL_SECS=$REWARDS_HARVEST_INTERVAL_SECS" + + resolve_pool_owner_pk + + local rewards_pk rewards_addr user_units total_units total_staked + local before_staked before_liquid before_reward before_acc after_staked after_liquid after_reward after_acc + local reward_user_balance_before reward_user_balance_after reserve_before_claim reserve_after_claim + local units_before_claim units_after_claim acc_before_claim acc_after_claim debt_before_claim debt_after_claim + local liquid_before_claim liquid_after_claim pending_before_claim pending_after_claim expected_debt_after_claim + local simulated_claim_before_tx raw_simulated_claim_before_tx expected_reserve_after_no_concurrency + local reward_reserve_credit_during_claim_block claim_receipt_json claim_tx_fee_wei claim_balance_delta + local claim_gross_from_wallet_delta errf raw i + + rewards_pk="$(dev_account_private_key_from_file "$REWARDS_ACCOUNT" "$DEV_ACCOUNTS_FILE" || true)" + if [[ -z "$rewards_pk" ]]; then + echo "error: missing $REWARDS_ACCOUNT in $DEV_ACCOUNTS_FILE" >&2 + exit 1 + fi + rewards_addr="$(cast wallet address --private-key "$rewards_pk" 2>/dev/null || true)" + if [[ -z "$rewards_addr" ]]; then + echo "error: could not derive address for $REWARDS_ACCOUNT" >&2 + exit 1 + fi + + total_staked="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + total_units="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalUnits()(uint256)")" + user_units="$(pool_evm_call_uint256_args "$POOL_EVM_ADDR" "$EVM_RPC" "unitsOf(address)(uint256)" "$rewards_addr")" + + if [[ "${WITHDRAW_SIZING_AUTO_DEPOSIT:-1}" == "1" ]]; then + local saved_withdraw_account="$WITHDRAW_SIZING_ACCOUNT" + WITHDRAW_SIZING_ACCOUNT="$REWARDS_ACCOUNT" + maybe_auto_deposit_for_withdraw_sizing "$user_units" "$total_units" "$total_staked" + WITHDRAW_SIZING_ACCOUNT="$saved_withdraw_account" + fi + + total_staked="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + total_units="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalUnits()(uint256)")" + user_units="$(pool_evm_call_uint256_args "$POOL_EVM_ADDR" "$EVM_RPC" "unitsOf(address)(uint256)" "$rewards_addr")" + log_contract_debug_state "rewards:start" + + if [[ ! "$total_units" =~ ^[0-9]+$ ]] || [[ "$total_units" == "0" ]]; then + if [[ "$SKIP_EMPTY_POOL_HARVEST" == "1" ]]; then + echo "warning: SKIP rewards phase: totalUnits is 0 (empty pool). Deep empty-pool harvest cases belong in Forge; see contracts/solidity/pool/README.md." >&2 + assert_liquid_reserve_invariants "rewards:empty-skip" || exit 1 + return 0 + fi + echo "error: rewards phase requires totalUnits>0 unless SKIP_EMPTY_POOL_HARVEST=1" >&2 + exit 1 + fi + if [[ ! "$user_units" =~ ^[0-9]+$ ]] || [[ "$user_units" == "0" ]]; then + echo "warning: SKIP rewards phase: $REWARDS_ACCOUNT has no pool units even after optional auto-deposit" >&2 + assert_liquid_reserve_invariants "rewards:no-user-units" || exit 1 + return 0 + fi + + for i in $(seq 1 "$REWARDS_HARVEST_COUNT"); do + before_staked="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + before_liquid="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "liquidBalance()(uint256)")" + before_reward="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "rewardReserve()(uint256)")" + before_acc="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "accRewardPerUnit()(uint256)")" + echo " -- harvest attempt $i/$REWARDS_HARVEST_COUNT" + if ! cast_send_expect_success "$EVM_RPC" "$POOL_OWNER_PK" "$POOL_EVM_ADDR" "harvest()"; then + echo "warning: harvest failed at attempt $i — likely no validator rewards available yet" >&2 + echo "note: this only means no fresh rewards were claimable from the distribution precompile at this instant" >&2 + echo "note: existing rewardReserve / user pending rewards may still be non-zero from earlier harvests or EndBlock automation" >&2 + if [[ "${REWARDS_REQUIRE_HARVEST_SUCCESS:-0}" == "1" ]]; then + echo "error: REWARDS_REQUIRE_HARVEST_SUCCESS=1 but harvest failed" >&2 + exit 1 + else + echo " -- skipping remaining harvest attempts and proceeding with claimRewards test" >&2 + break + fi + fi + after_staked="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + after_liquid="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "liquidBalance()(uint256)")" + after_reward="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "rewardReserve()(uint256)")" + after_acc="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "accRewardPerUnit()(uint256)")" + if ! uint256_eq "$before_staked" "$after_staked"; then + echo "error: harvest changed totalStaked on attempt $i (before=$before_staked after=$after_staked)" >&2 + exit 1 + fi + if uint256_gt "$before_liquid" "$after_liquid"; then + echo "error: harvest decreased liquidBalance on attempt $i (before=$before_liquid after=$after_liquid)" >&2 + exit 1 + fi + if uint256_gt "$before_reward" "$after_reward"; then + echo "error: harvest decreased rewardReserve on attempt $i (before=$before_reward after=$after_reward)" >&2 + exit 1 + fi + if uint256_gt "$before_acc" "$after_acc"; then + echo "error: harvest decreased accRewardPerUnit on attempt $i (before=$before_acc after=$after_acc)" >&2 + exit 1 + fi + echo " liquid delta=$(uint256_sub_nonnegative "$after_liquid" "$before_liquid") rewardReserve delta=$(uint256_sub_nonnegative "$after_reward" "$before_reward") accRewardPerUnit delta=$(uint256_sub_nonnegative "$after_acc" "$before_acc")" + assert_liquid_reserve_invariants "rewards:post-harvest-$i" || exit 1 + if [[ "$REWARDS_HARVEST_INTERVAL_SECS" =~ ^[0-9]+$ ]] && (( REWARDS_HARVEST_INTERVAL_SECS > 0 )) && (( i < REWARDS_HARVEST_COUNT )); then + sleep "$REWARDS_HARVEST_INTERVAL_SECS" + fi + done + + reserve_before_claim="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "rewardReserve()(uint256)")" + units_before_claim="$(pool_evm_call_uint256_args "$POOL_EVM_ADDR" "$EVM_RPC" "unitsOf(address)(uint256)" "$rewards_addr")" + acc_before_claim="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "accRewardPerUnit()(uint256)")" + debt_before_claim="$(pool_evm_call_uint256_args "$POOL_EVM_ADDR" "$EVM_RPC" "rewardDebt(address)(uint256)" "$rewards_addr")" + liquid_before_claim="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "liquidBalance()(uint256)")" + reward_user_balance_before="$(pool_evm_call_uint256_args "$BOND_PRECOMPILE" "$EVM_RPC" "balanceOf(address)(uint256)" "$rewards_addr")" + pending_before_claim="$(uint256_pending_rewards_from_index "$units_before_claim" "$acc_before_claim" "$debt_before_claim")" + expected_debt_after_claim="$(python3 -c "print((int('$units_before_claim') * int('$acc_before_claim')) // 10**18)" 2>/dev/null || echo "")" + raw_simulated_claim_before_tx="$(cast call --rpc-url "$EVM_RPC" --from "$rewards_addr" "$POOL_EVM_ADDR" "claimRewards()(uint256)" 2>/dev/null || true)" + if simulated_claim_before_tx="$(normalize_cast_uint256_output "$raw_simulated_claim_before_tx")"; then + : + else + echo "error: could not simulate claimRewards() with eth_call before sending tx" >&2 + exit 1 + fi + expected_reserve_after_no_concurrency="$(uint256_sub_strict "$reserve_before_claim" "$simulated_claim_before_tx" || true)" + if [[ -z "$expected_reserve_after_no_concurrency" ]]; then + echo "error: simulated claimRewards() amount $simulated_claim_before_tx exceeds rewardReserve before claim $reserve_before_claim" >&2 + exit 1 + fi + + echo " -- pre-claim reward accounting snapshot" + echo " units=$units_before_claim accRewardPerUnit=$acc_before_claim rewardDebt=$debt_before_claim pendingFromIndex=$pending_before_claim" + echo " rewardReserve=$reserve_before_claim liquidBalance=$liquid_before_claim user bond balance=$reward_user_balance_before" + echo " claimRewards() eth_call simulation=$simulated_claim_before_tx" + echo " expected rewardReserve after claim if no later credit lands in the same block=$expected_reserve_after_no_concurrency" + echo " interpretation: eth_call is the authoritative pre-tx expectation; pendingFromIndex is a readable cross-check from reward accounting state" + echo " -- claimRewards() for $REWARDS_ACCOUNT" + wait_evm_nonce_settled_for_pk "$rewards_pk" "$EVM_RPC" 45 + errf="$(mktemp -t cast_claim_rewards.XXXXXX)" + raw="$(cast send --json --rpc-url "$EVM_RPC" --private-key "$rewards_pk" "$POOL_EVM_ADDR" "claimRewards()" 2>"$errf")" || { + cat "$errf" >&2 + rm -f "$errf" + echo "error: claimRewards failed for $REWARDS_ACCOUNT" >&2 + exit 1 + } + rm -f "$errf" + if ! claim_receipt_json="$(cast_stdout_to_receipt_json "$raw")"; then + echo "error: claimRewards did not return parseable JSON receipt for $REWARDS_ACCOUNT" >&2 + echo "$raw" >&2 + exit 1 + fi + if ! echo "$claim_receipt_json" | jq -e '.status' >/dev/null 2>&1; then + echo "error: claimRewards receipt JSON missing .status for $REWARDS_ACCOUNT" >&2 + echo "$claim_receipt_json" >&2 + exit 1 + fi + if ! cast_receipt_success "$claim_receipt_json"; then + echo "error: claimRewards reverted for $REWARDS_ACCOUNT (status=$(echo "$claim_receipt_json" | jq -r '.status'))" >&2 + exit 1 + fi + claim_tx_fee_wei="$(receipt_json_effective_fee_wei "$claim_receipt_json" || true)" + if [[ -z "$claim_tx_fee_wei" ]]; then + echo "error: could not derive claimRewards tx fee from receipt JSON" >&2 + echo "$claim_receipt_json" >&2 + exit 1 + fi + reserve_after_claim="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "rewardReserve()(uint256)")" + units_after_claim="$(pool_evm_call_uint256_args "$POOL_EVM_ADDR" "$EVM_RPC" "unitsOf(address)(uint256)" "$rewards_addr")" + acc_after_claim="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "accRewardPerUnit()(uint256)")" + debt_after_claim="$(pool_evm_call_uint256_args "$POOL_EVM_ADDR" "$EVM_RPC" "rewardDebt(address)(uint256)" "$rewards_addr")" + liquid_after_claim="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "liquidBalance()(uint256)")" + reward_user_balance_after="$(pool_evm_call_uint256_args "$BOND_PRECOMPILE" "$EVM_RPC" "balanceOf(address)(uint256)" "$rewards_addr")" + pending_after_claim="$(uint256_pending_rewards_from_index "$units_after_claim" "$acc_after_claim" "$debt_after_claim")" + reward_reserve_credit_during_claim_block="$(python3 -c "print(int('$reserve_after_claim') - int('$expected_reserve_after_no_concurrency'))" 2>/dev/null || echo "")" + claim_balance_delta="$(uint256_sub_nonnegative "$reward_user_balance_after" "$reward_user_balance_before")" + claim_gross_from_wallet_delta="$(uint256_add "$claim_balance_delta" "$claim_tx_fee_wei")" + + if ! uint256_eq "$simulated_claim_before_tx" "$pending_before_claim"; then + echo "warning: pre-claim pendingFromIndex ($pending_before_claim) differed from claimRewards() eth_call simulation ($simulated_claim_before_tx)" >&2 + echo "note: this can happen on a live devnet if state changes between separate reads; the simulation is treated as authoritative" >&2 + fi + if ! uint256_eq "$claim_gross_from_wallet_delta" "$simulated_claim_before_tx"; then + echo "error: claimRewards gross wallet accounting mismatch for $REWARDS_ACCOUNT (expected eth_call=$simulated_claim_before_tx net_wallet_delta=$claim_balance_delta tx_fee=$claim_tx_fee_wei net_plus_fee=$claim_gross_from_wallet_delta)" >&2 + exit 1 + fi + if ! uint256_eq "$debt_after_claim" "$expected_debt_after_claim"; then + echo "error: claimRewards rewardDebt mismatch for $REWARDS_ACCOUNT (expected=$expected_debt_after_claim after=$debt_after_claim)" >&2 + exit 1 + fi + if ! uint256_eq "$units_before_claim" "$units_after_claim"; then + echo "error: claimRewards changed pool units for $REWARDS_ACCOUNT (before=$units_before_claim after=$units_after_claim)" >&2 + exit 1 + fi + if [[ -z "$reward_reserve_credit_during_claim_block" ]] || [[ "$reward_reserve_credit_during_claim_block" =~ ^- ]]; then + echo "error: claimRewards reserve accounting mismatch for $REWARDS_ACCOUNT (before=$reserve_before_claim pending=$pending_before_claim after=$reserve_after_claim impliedConcurrentCredit=$reward_reserve_credit_during_claim_block)" >&2 + exit 1 + fi + + echo " -- post-claim reward accounting snapshot" + echo " units=$units_after_claim accRewardPerUnit=$acc_after_claim rewardDebt=$debt_after_claim pendingFromIndex=$pending_after_claim" + echo " rewardReserve=$reserve_after_claim liquidBalance=$liquid_after_claim user bond balance=$reward_user_balance_after" + echo " user bond balance delta (net of gas)=$claim_balance_delta" + echo " claimRewards tx fee (same bond/native denom)=$claim_tx_fee_wei" + echo " user wallet gross reward delta (net + fee)=$claim_gross_from_wallet_delta" + echo " expected claim from pre-claim index state=$pending_before_claim" + echo " expected claim from pre-tx eth_call simulation=$simulated_claim_before_tx" + echo " reserve movement explained as: rewardReserve_after = rewardReserve_before - claimed + concurrentRewardCredit" + echo " concurrentRewardCreditDuringClaimBlock=$reward_reserve_credit_during_claim_block" + if uint256_gt "$acc_after_claim" "$acc_before_claim"; then + echo " note: accRewardPerUnit increased during/after the claim block; EndBlock auto-harvest likely credited fresh rewards after the user claim" + echo " note: pendingFromIndex(after)=$pending_after_claim means the user already has new unclaimed rewards from that later credit" + else + echo " note: accRewardPerUnit unchanged across the claim block; no same-block post-claim harvest was observed" + fi + assert_liquid_reserve_invariants "rewards:post-claim" || exit 1 + log_contract_debug_state "rewards:after-claim" + + echo " rewards phase: ok" +} + +require_bins() { + command -v jq >/dev/null 2>&1 || { + echo "missing dependency: jq" >&2 + exit 1 + } + command -v curl >/dev/null 2>&1 || { + echo "missing dependency: curl" >&2 + exit 1 + } + command -v cast >/dev/null 2>&1 || { + echo "missing dependency: cast" >&2 + exit 1 + } + command -v evmd >/dev/null 2>&1 || { + echo "missing dependency: evmd" >&2 + exit 1 + } +} + +main() { + require_bins + # Optional: bash community_pool_edge_cases.sh auth|drift|withdraw_sizing|liquidity|dust|rewards|all|auth,drift + if [[ -n "${1:-}" ]]; then + case "$1" in + auth|drift|withdraw_sizing|liquidity|dust|rewards) + COMMUNITY_POOL_EDGE_PHASES="$1" + shift + ;; + all) + COMMUNITY_POOL_EDGE_PHASES="auth,drift,withdraw_sizing,liquidity,dust,rewards" + shift + ;; + *) + if [[ "$1" == *","* ]]; then + COMMUNITY_POOL_EDGE_PHASES="$1" + shift + else + echo "error: unknown phase '$1' (expected auth|drift|withdraw_sizing|liquidity|dust|rewards|all|comma-separated list)" >&2 + exit 1 + fi + ;; + esac + fi + COMMUNITY_POOL_EDGE_PHASES="${COMMUNITY_POOL_EDGE_PHASES:-auth}" + + if [[ ! -f "$DEV_ACCOUNTS_FILE" ]]; then + echo "error: missing $DEV_ACCOUNTS_FILE" >&2 + echo "hint: start a devnet with rebalance_scenario_runner or multi_node_startup so dev accounts exist" >&2 + exit 1 + fi + + echo "==> community_pool_edge_cases" + echo " COMMUNITY_POOL_EDGE_PHASES=${COMMUNITY_POOL_EDGE_PHASES}" + echo " NODE_RPC=$NODE_RPC EVM_RPC=$EVM_RPC (ensure_evm_rpc_ready may override EVM_RPC)" + + resolve_pool_evm_addr + ensure_evm_rpc_ready || exit 1 + echo " EVM_RPC=$EVM_RPC" + + local ran_any=false + if phase_enabled auth; then + run_phase_auth + ran_any=true + fi + if phase_enabled drift; then + run_phase_drift + ran_any=true + fi + if phase_enabled withdraw_sizing; then + run_phase_withdraw_sizing + ran_any=true + fi + if phase_enabled liquidity; then + run_phase_liquidity + ran_any=true + fi + if phase_enabled dust; then + run_phase_dust + ran_any=true + fi + if phase_enabled rewards; then + run_phase_rewards + ran_any=true + fi + if [[ "$ran_any" == "false" ]]; then + echo " (no phases enabled matching COMMUNITY_POOL_EDGE_PHASES; nothing to do)" + fi + + log_flow_section "Done" "community_pool_edge_cases finished successfully." +} + +main "$@" diff --git a/tests/e2e/poolrebalancer/lib/pool_e2e_common.sh b/tests/e2e/poolrebalancer/lib/pool_e2e_common.sh new file mode 100755 index 00000000..618e91ee --- /dev/null +++ b/tests/e2e/poolrebalancer/lib/pool_e2e_common.sh @@ -0,0 +1,410 @@ +#!/usr/bin/env bash +# Shared helpers for tests/e2e/poolrebalancer/*.sh (e.g. user_flow_multikey.sh). +# +# Provides: EVM RPC discovery, bech32→hex for cast, uint256 cast helpers, approve+deposit, +# cast send w/ receipt check, unbonding parse, user balance snapshots. +# Requires: cast, jq, evmd (where noted); python3 optional for hex/JSON edge cases. +# +set -euo pipefail + +# --- CometBFT / JSON-RPC mapping (NODE_RPC tcp …:26657 → val_i JSON-RPC 8545+i*10) --- + +tendermint_status_url() { + local hp="${NODE_RPC#tcp://}" + hp="${hp#http://}" + hp="${hp#https://}" + printf 'http://%s/status' "$hp" +} + +derive_evm_rpc_from_node_rpc() { + local node="$1" + local hostport host port idx jsonrpc_port + hostport="${node#tcp://}" + hostport="${hostport#http://}" + hostport="${hostport#https://}" + host="${hostport%%:*}" + port="${hostport##*:}" + if [[ "$port" =~ ^[0-9]+$ ]] && (( port >= 26657 )) && (( (port - 26657) % 100 == 0 )); then + idx=$(( (port - 26657) / 100 )) + jsonrpc_port=$((8545 + (idx * 10))) + printf 'http://%s:%s' "$host" "$jsonrpc_port" + return 0 + fi + return 1 +} + +# Poll common JSON-RPC ports until cast chain-id succeeds; exports EVM_RPC. +ensure_evm_rpc_ready() { + local candidates=() derived="" c start now + if derived="$(derive_evm_rpc_from_node_rpc "${NODE_RPC:-}" 2>/dev/null || true)"; then + candidates+=("$derived") + fi + candidates+=("${EVM_RPC:-}") + candidates+=("http://127.0.0.1:8545" "http://127.0.0.1:8555" "http://127.0.0.1:8565" "http://127.0.0.1:8575") + + echo "==> Waiting for EVM RPC readiness" + start="$(date +%s)" + while true; do + for c in "${candidates[@]}"; do + [[ -z "$c" ]] && continue + if cast chain-id --rpc-url "$c" >/dev/null 2>&1; then + if [[ "${EVM_RPC:-}" != "$c" ]]; then + echo "==> Using EVM RPC endpoint: $c" + fi + EVM_RPC="$c" + return 0 + fi + done + now="$(date +%s)" + if (( now - start > 120 )); then + echo "error: no reachable EVM JSON-RPC endpoint found after 120s" >&2 + echo "tried: ${candidates[*]}" >&2 + return 1 + fi + sleep 2 + done +} + +# --- Dev accounts (multi_node_startup / scenario runner dev_accounts.txt) --- + +dev_account_private_key_from_file() { + local account_name="$1" + local f="$2" + [[ -f "$f" ]] || return 1 + awk -v name="$account_name" ' + $0 ~ ("^" name ":") {in_block=1; next} + in_block && $1=="private_key:" {print $2; exit} + in_block && /^[^[:space:]]/ {in_block=0} + ' "$f" +} + +# Sum delegations for a delegator in current staking bond denom (wei-like integer). +staking_delegations_bond_total_wei() { + local node_rpc="$1" + local delegator="$2" + local bond_denom dels_json + bond_denom="$(evmd query staking params --node "$node_rpc" -o json 2>/dev/null | jq -r '.params.bond_denom // .bond_denom // empty' 2>/dev/null || true)" + dels_json="$(evmd query staking delegations "$delegator" --node "$node_rpc" -o json 2>/dev/null || true)" + if [[ -z "$dels_json" ]]; then + echo "0" + return 0 + fi + if command -v python3 >/dev/null 2>&1; then + python3 -c ' +import json, sys +raw = sys.stdin.read().strip() +if not raw: + print(0) + sys.exit(0) +data = json.loads(raw) +denom = sys.argv[1] +total = 0 +for r in data.get("delegation_responses", []): + bal = (r or {}).get("balance") or {} + if denom and bal.get("denom") != denom: + continue + amt = bal.get("amount", "0") + try: + total += int(str(amt)) + except Exception: + pass +print(total) +' "$bond_denom" <<<"$dels_json" 2>/dev/null || echo "0" + else + # Fallback for environments without python3. `awk` avoids scientific notation. + if [[ -n "$bond_denom" ]]; then + echo "$dels_json" | jq -r --arg denom "$bond_denom" '.delegation_responses[]? | select(.balance.denom == $denom) | .balance.amount' 2>/dev/null | awk '{s+=$1} END {printf "%.0f\n", s+0}' + else + echo "$dels_json" | jq -r '.delegation_responses[]? | .balance.amount' 2>/dev/null | awk '{s+=$1} END {printf "%.0f\n", s+0}' + fi + fi +} + +# --- Bech32 account → 0x for cast (uses CHAIN_HOME when set) --- + +evmd_debug_addr() { + local addr="$1" + if [[ -n "${CHAIN_HOME:-}" && -d "$CHAIN_HOME" ]]; then + evmd debug addr "$addr" --home "$CHAIN_HOME" 2>/dev/null || evmd debug addr "$addr" 2>/dev/null || true + else + evmd debug addr "$addr" 2>/dev/null || true + fi +} + +resolve_evm_hex_from_bech32() { + local bech="$1" + local dbg hex + dbg="$(evmd_debug_addr "$bech")" + hex="$(printf '%s\n' "$dbg" | awk '{for(i=1;i<=NF;i++){if($i ~ /^0x[0-9a-fA-F]{40}$/){print $i; exit}}}')" + if [[ -z "$hex" ]]; then + hex="$(printf '%s\n' "$dbg" | awk -F': ' '/Address \(hex\)/{print $2; exit}')" + fi + if [[ "$hex" =~ ^[0-9a-fA-F]{40}$ ]]; then + hex="0x$hex" + fi + printf '%s' "$hex" +} + +# --- cast call helpers --- + +normalize_cast_uint256_output() { + local s="${1:-}" + s="${s//$'\r'/}" + s="${s%%$'\n'*}" + s="${s%% *}" + s="${s//$'\t'/}" + [[ "$s" =~ ^[0-9]+$ ]] && { printf '%s' "$s"; return 0; } + if [[ "$s" =~ ^0x[0-9a-fA-F]+$ ]] && command -v python3 >/dev/null 2>&1; then + python3 -c "print(int('$s',16))" 2>/dev/null && return 0 + fi + return 1 +} + +# uint256 helpers for shell assertions. +uint256_eq() { + local a="${1:-0}" b="${2:-0}" + [[ "$a" =~ ^[0-9]+$ ]] || return 1 + [[ "$b" =~ ^[0-9]+$ ]] || return 1 + [[ "$a" == "$b" ]] +} + +uint256_gt() { + local a="${1:-0}" b="${2:-0}" + [[ "$a" =~ ^[0-9]+$ ]] || return 1 + [[ "$b" =~ ^[0-9]+$ ]] || return 1 + if command -v python3 >/dev/null 2>&1; then + python3 -c "import sys; sys.exit(0 if int(sys.argv[1]) > int(sys.argv[2]) else 1)" "$a" "$b" + else + # Fallback lexical compare by length for big integers. + (( ${#a} > ${#b} )) && return 0 + (( ${#a} < ${#b} )) && return 1 + [[ "$a" > "$b" ]] + fi +} + +# View call without extra args; returns decimal string or n/a. +pool_evm_call_uint256() { + local pool="$1" + local rpc="$2" + local sig="$3" + local raw norm + [[ -z "$pool" ]] && { printf 'n/a'; return 0; } + raw="$(cast call --rpc-url "$rpc" "$pool" "$sig" 2>/dev/null || true)" + if norm="$(normalize_cast_uint256_output "$raw")"; then + printf '%s' "$norm" + else + printf 'n/a' + fi +} + +pool_evm_call_uint256_args() { + local pool="$1" + local rpc="$2" + local sig="$3" + shift 3 + local raw norm + [[ -z "$pool" ]] && { printf 'n/a'; return 0; } + raw="$(cast call --rpc-url "$rpc" "$pool" "$sig" "$@" 2>/dev/null || true)" + if norm="$(normalize_cast_uint256_output "$raw")"; then + printf '%s' "$norm" + else + printf 'n/a' + fi +} + +# Normalize cast balance output to decimal wei (decimal or 0x hex). +normalize_cast_balance_wei() { + local s="${1:-}" + s="${s//$'\r'/}" + s="${s%%$'\n'*}" + s="${s//[[:space:]]/}" + [[ -z "$s" ]] && { printf 'n/a'; return 0; } + [[ "$s" =~ ^[0-9]+$ ]] && { printf '%s' "$s"; return 0; } + if [[ "$s" =~ ^0x[0-9a-fA-F]+$ ]] && command -v python3 >/dev/null 2>&1; then + python3 -c "print(int('$s',16))" 2>/dev/null && return 0 + fi + printf '%s' "$s" +} + +# Multi-line snapshot for E2E logs (native wallet, bond ERC20, pool unitsOf). +log_user_withdraw_snapshot() { + local rpc="$1" + local pool_evm="$2" + local bond_precompile="$3" + local user_addr="$4" + local name="$5" + local label="$6" + local liquid_native bond_bal units + liquid_native="$(normalize_cast_balance_wei "$(cast balance --rpc-url "$rpc" "$user_addr" 2>/dev/null || true)")" + bond_bal="$(pool_evm_call_uint256_args "$bond_precompile" "$rpc" "balanceOf(address)(uint256)" "$user_addr")" + units="$(pool_evm_call_uint256_args "$pool_evm" "$rpc" "unitsOf(address)(uint256)" "$user_addr")" + echo " snapshot[$label] $name" + echo " addr $user_addr" + echo " liquid_native_wei $liquid_native (EOA native balance)" + echo " bond_token_wei $bond_bal (bond ERC20 in wallet)" + echo " pool_units $units (CommunityPool unitsOf)" +} + +# --- cast send: wait for nonce, parse JSON receipt, check status --- + +wait_evm_nonce_settled_for_pk() { + local pk="$1" + local rpc="$2" + local deadline_sec="${3:-45}" + local addr pending latest t0 + addr="$(cast wallet address --private-key "$pk")" + t0="$(date +%s)" + while true; do + pending="$(cast nonce --rpc-url "$rpc" --block pending "$addr" 2>/dev/null || true)" + latest="$(cast nonce --rpc-url "$rpc" --block latest "$addr" 2>/dev/null || true)" + [[ -z "$pending" || -z "$latest" ]] && return 0 + [[ "$pending" == "$latest" ]] && return 0 + if (( $(date +%s) - t0 > deadline_sec )); then + return 0 + fi + sleep 1 + done +} + +# Receipt .status is 0x1 / 0x01 when successful. +cast_receipt_success() { + local json="$1" + local status + status="$(echo "$json" | jq -r '.status // empty')" + [[ "$status" == "0x1" || "$status" == "0x01" ]] +} + +# cast --json stdout may include non-JSON lines before the receipt; decode first JSON value from first '{'. +cast_stdout_to_receipt_json() { + local raw="$1" + if command -v python3 >/dev/null 2>&1; then + python3 -c " +import json, sys +s = sys.stdin.read() +start = s.find('{') +if start < 0: + sys.exit(1) +obj, _ = json.JSONDecoder().raw_decode(s[start:]) +sys.stdout.write(json.dumps(obj)) +" <<<"$raw" 2>/dev/null && return 0 + fi + local line json_line="" + while IFS= read -r line || [[ -n "$line" ]]; do + [[ "$line" =~ ^\{ ]] && json_line="$line" + done <<<"$(printf '%s\n' "$raw")" + [[ -n "${json_line:-}" ]] && printf '%s' "$json_line" && return 0 + return 1 +} + +cast_send_expect_success() { + local rpc="$1" + local pk="$2" + local target="$3" + local sig="$4" + shift 4 + local errf raw json + wait_evm_nonce_settled_for_pk "$pk" "$rpc" 45 + errf="$(mktemp -t cast_ok.XXXXXX)" + if [[ -n "${CAST_SEND_GAS_LIMIT:-}" ]]; then + raw="$(cast send --json --rpc-url "$rpc" --private-key "$pk" --gas-limit "${CAST_SEND_GAS_LIMIT}" "$target" "$sig" "$@" 2>"$errf")" || { + cat "$errf" >&2 + rm -f "$errf" + return 1 + } + else + raw="$(cast send --json --rpc-url "$rpc" --private-key "$pk" "$target" "$sig" "$@" 2>"$errf")" || { + cat "$errf" >&2 + rm -f "$errf" + return 1 + } + fi + rm -f "$errf" + if ! json="$(cast_stdout_to_receipt_json "$raw")"; then + echo "error: cast send did not return parseable JSON receipt (target=$target sig=$sig)" >&2 + echo "$raw" >&2 + return 1 + fi + if ! echo "$json" | jq -e '.status' >/dev/null 2>&1; then + echo "error: receipt JSON missing .status (target=$target sig=$sig)" >&2 + echo "$json" >&2 + return 1 + fi + if cast_receipt_success "$json"; then + return 0 + fi + echo "error: transaction reverted (status=$(echo "$json" | jq -r .status)) target=$target sig=$sig" >&2 + return 1 +} + +# Expect a tx to revert (receipt status != 0x1). +cast_send_expect_revert() { + local rpc="$1" + local pk="$2" + local target="$3" + local sig="$4" + shift 4 + local errf raw json status errtxt + wait_evm_nonce_settled_for_pk "$pk" "$rpc" 45 + errf="$(mktemp -t cast_revert.XXXXXX)" + if [[ -n "${CAST_SEND_GAS_LIMIT:-}" ]]; then + raw="$(cast send --json --rpc-url "$rpc" --private-key "$pk" --gas-limit "${CAST_SEND_GAS_LIMIT}" "$target" "$sig" "$@" 2>"$errf")" || true + else + raw="$(cast send --json --rpc-url "$rpc" --private-key "$pk" "$target" "$sig" "$@" 2>"$errf")" || true + fi + errtxt="$(<"$errf")" + rm -f "$errf" + # cast can return non-zero with "execution reverted" and no JSON receipt. + # For expect-revert assertions this should be considered success. + if [[ -n "$errtxt" && "$errtxt" == *"execution reverted"* ]]; then + return 0 + fi + if [[ -n "$errtxt" && "$errtxt" == *"transaction reverted"* ]]; then + return 0 + fi + if ! json="$(cast_stdout_to_receipt_json "$raw")"; then + echo "error: cast send did not return parseable JSON receipt for expect-revert (target=$target sig=$sig)" >&2 + [[ -n "$raw" ]] && echo "$raw" >&2 + [[ -n "$errtxt" ]] && echo "$errtxt" >&2 + return 1 + fi + status="$(echo "$json" | jq -r '.status // empty')" + if [[ "$status" == "0x1" || "$status" == "0x01" ]]; then + echo "error: expected revert but transaction succeeded target=$target sig=$sig" >&2 + return 1 + fi + return 0 +} + +# Standard pool onboarding: approve bond for pool, then deposit(uint256). +approve_and_deposit() { + local pk="$1" + local pool_evm="$2" + local bond_precompile="$3" + local amount="$4" + local rpc="$5" + cast_send_expect_success "$rpc" "$pk" "$bond_precompile" \ + "approve(address,uint256)" "$pool_evm" "$amount" \ + && cast_send_expect_success "$rpc" "$pk" "$pool_evm" "deposit(uint256)" "$amount" +} + +# staking params unbonding_time string (e.g. 30s, 1h30m) → integer seconds. +parse_unbonding_seconds() { + local raw="${1:-30s}" + if command -v python3 >/dev/null 2>&1; then + python3 -c " +import re, sys +raw = sys.argv[1].strip() +if not raw: + print(30) + sys.exit(0) +secs = 0 +for n, u in re.findall(r'(\d+)(h|m|s)', raw): + n = int(n) + if u == 'h': secs += n * 3600 + elif u == 'm': secs += n * 60 + else: secs += n +print(secs if secs else 30) +" "$raw" 2>/dev/null && return 0 + fi + echo 30 +} diff --git a/tests/e2e/poolrebalancer/rebalance_scenario_runner.sh b/tests/e2e/poolrebalancer/rebalance_scenario_runner.sh new file mode 100755 index 00000000..64a8199a --- /dev/null +++ b/tests/e2e/poolrebalancer/rebalance_scenario_runner.sh @@ -0,0 +1,1985 @@ +#!/usr/bin/env bash +set -euo pipefail + +# rebalance_scenario_runner.sh — local E2E for x/poolrebalancer with a CommunityPool contract delegator. +# Interactive / manual inspection (pending queues, watch modes), not a deterministic CI harness. +# +# Scenarios: happy_path | caps | threshold_boundary | expansion +# Watch: watch (queues + pool reads) +# user_flow_multikey: CommunityPool multi-account E2E (user_flow_multikey.sh); see usage() "user_flow_multikey — what it is for" +# +# Caveat: user-withdraw undelegation maturity follows wall clock (header time vs completion), not block height. +# ============================================================================ + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." && pwd)" +BASEDIR="${BASEDIR:-"$HOME/.og-evm-devnet"}" +NODE_RPC="${NODE_RPC:-"tcp://127.0.0.1:26657"}" +CHAIN_ID="${CHAIN_ID:-10740}" +KEYRING="${KEYRING:-test}" +HOME0="$BASEDIR/val0" +CHAIN_HOME="${CHAIN_HOME:-$BASEDIR}" +POOL_DELEGATOR_MODE="${POOL_DELEGATOR_MODE:-contract}" +POOL_OWNER_PK="${POOL_OWNER_PK:-0x88cbead91aee890d27bf06e003ade3d4e952427e88f88d31d61d3ef5e5d54305}" # gitleaks:allow +POOL_DEPOSITOR_PK="${POOL_DEPOSITOR_PK:-0x741de4f8988ea941d3ff0287911ca4074e62b7d45c991a51186455366f10b544}" # gitleaks:allow +MODULE_EVM="${MODULE_EVM:-0x786c305E2aAc2168BB7555Ef522c5F20a2cd0dA9}" +BOND_PRECOMPILE="${BOND_PRECOMPILE:-0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE}" +POOL_CONTRACT_ADDR="${POOL_CONTRACT_ADDR:-}" +GOV_FROM="${GOV_FROM:-mykey}" +GOV_HOME="${GOV_HOME:-}" +GOV_DEPOSIT="${GOV_DEPOSIT:-10000000ogwei}" +GOV_FEES="${GOV_FEES:-400000000000000ogwei}" +GOV_WAIT_INITIAL="${GOV_WAIT_INITIAL:-20}" +GOV_POLL_TIMEOUT="${GOV_POLL_TIMEOUT:-20}" +GOV_STATUS_TIMEOUT="${GOV_STATUS_TIMEOUT:-120}" +EVM_RPC="${EVM_RPC:-http://127.0.0.1:8545}" +POOL_SEED_DEPOSIT_AMOUNT="${POOL_SEED_DEPOSIT_AMOUNT:-200000000000000000000}" +DEFAULT_POOL_OWNER_PK="0x88cbead91aee890d27bf06e003ade3d4e952427e88f88d31d61d3ef5e5d54305" +DEFAULT_POOL_DEPOSITOR_PK="0x741de4f8988ea941d3ff0287911ca4074e62b7d45c991a51186455366f10b544" + +# ----------------------------------------------------------------------------- +# Runtime knobs (env vars take precedence) +# ----------------------------------------------------------------------------- +# Track which knobs were explicitly provided via environment so scenario defaults +# can apply only when not set by the user. +USER_SET_MAX_TARGET_VALIDATORS=false +[[ -n "${POOLREBALANCER_MAX_TARGET_VALIDATORS+x}" ]] && USER_SET_MAX_TARGET_VALIDATORS=true +USER_SET_THRESHOLD_BP=false +[[ -n "${POOLREBALANCER_THRESHOLD_BP+x}" ]] && USER_SET_THRESHOLD_BP=true +USER_SET_MAX_OPS_PER_BLOCK=false +[[ -n "${POOLREBALANCER_MAX_OPS_PER_BLOCK+x}" ]] && USER_SET_MAX_OPS_PER_BLOCK=true +USER_SET_MAX_MOVE_PER_OP=false +[[ -n "${POOLREBALANCER_MAX_MOVE_PER_OP+x}" ]] && USER_SET_MAX_MOVE_PER_OP=true +USER_SET_STAKING_MAX_ENTRIES=false +[[ -n "${STAKING_MAX_ENTRIES+x}" ]] && USER_SET_STAKING_MAX_ENTRIES=true +USER_SET_IMBALANCE_MINOR_DELEGATION=false +[[ -n "${IMBALANCE_MINOR_DELEGATION+x}" ]] && USER_SET_IMBALANCE_MINOR_DELEGATION=true +USER_SET_POOL_SEED_DEPOSIT_AMOUNT=false +[[ -n "${POOL_SEED_DEPOSIT_AMOUNT+x}" ]] && USER_SET_POOL_SEED_DEPOSIT_AMOUNT=true +USER_SET_STAKING_UNBONDING_TIME=false +[[ -n "${STAKING_UNBONDING_TIME+x}" ]] && USER_SET_STAKING_UNBONDING_TIME=true +USER_SET_POLL_SAMPLES=false +[[ -n "${POLL_SAMPLES+x}" ]] && USER_SET_POLL_SAMPLES=true +USER_SET_POLL_SLEEP_SECS=false +[[ -n "${POLL_SLEEP_SECS+x}" ]] && USER_SET_POLL_SLEEP_SECS=true + +SCENARIO="${SCENARIO:-happy_path}" +VALIDATOR_COUNT="${VALIDATOR_COUNT:-}" +POOLREBALANCER_MAX_TARGET_VALIDATORS="${POOLREBALANCER_MAX_TARGET_VALIDATORS:-3}" +# Demo profile controls default speed so users can observe behavior. +# slow = very gradual progress (good for watching) +# medium = balanced default for demo +# fast = converges quickly +DEMO_PROFILE="${DEMO_PROFILE:-medium}" +POOLREBALANCER_THRESHOLD_BP="${POOLREBALANCER_THRESHOLD_BP:-0}" +POOLREBALANCER_MAX_OPS_PER_BLOCK="${POOLREBALANCER_MAX_OPS_PER_BLOCK:-2}" +POOLREBALANCER_MAX_MOVE_PER_OP="${POOLREBALANCER_MAX_MOVE_PER_OP:-100000000000000000000}" # 1e20 + +# Tune staking params so maturity behavior is visible in test runs. +STAKING_UNBONDING_TIME="${STAKING_UNBONDING_TIME:-30s}" +STAKING_MAX_ENTRIES="${STAKING_MAX_ENTRIES:-100}" + +TX_FEES="${TX_FEES:-200000000000000ogwei}" # denom will be rewritten after chain start + +# Seed amounts used to create a clear imbalance (safe with default dev funding). +IMBALANCE_MAIN_DELEGATION="${IMBALANCE_MAIN_DELEGATION:-200000000000000000000000ogwei}" # denom rewritten after chain start +IMBALANCE_MINOR_DELEGATION="${IMBALANCE_MINOR_DELEGATION:-100ogwei}" + +POLL_SAMPLES="${POLL_SAMPLES:-25}" +POLL_SLEEP_SECS="${POLL_SLEEP_SECS:-2}" +# Always-on observability/runtime behavior for CLI usage. +STREAM_VALIDATOR_LOGS="true" +KEEP_RUNNING="true" +WATCH_COMPACT="${WATCH_COMPACT:-false}" + +LOG_STREAM_PIDS=() +CURRENT_PHASE="init" +SETUP_STARTED="false" +POOL_DEL_ADDR="" +POOL_EVM_ADDR="" +RESOLVED_GOV_FROM="" +RESOLVED_GOV_HOME="" +WATCH_INITIAL_DELEGATIONS_LOGGED="false" +EXPANSION_MISSING_DSTS=() +EXPANSION_OBSERVED_DSTS_TEXT="" +EXPANSION_INITIAL_DELEGATED=() + +on_interrupt() { + echo + echo "==> Interrupt received, stopping test setup..." + # Stop child processes spawned by this script first. + pkill -TERM -P "$$" >/dev/null 2>&1 || true + cleanup_on_exit + exit 130 +} + +cleanup_on_exit() { + cleanup_log_streams + if [[ "$SETUP_STARTED" == "true" ]]; then + stop_nodes + fi +} + +usage() { + cat < Number of validators/nodes to run + -s, --scenario Scenario name (same as SCENARIO env var) + -p, --profile Demo profile: slow|medium|fast + --stress-profile user_flow_multikey profile (100users|stress100) + --user-count user_flow_multikey USER_COUNT override + --withdraw-users user_flow_multikey WITHDRAW_USERS override + --flow-mode user_flow_multikey mode: serial|parallel + --deposit-concurrency user_flow_multikey deposit worker concurrency + --withdraw-concurrency user_flow_multikey withdraw submit worker concurrency + --claim-concurrency user_flow_multikey claimWithdraw worker concurrency + --claim-rewards-concurrency user_flow_multikey claimRewards worker concurrency + --batch-delay-ms user_flow_multikey inter-batch delay milliseconds + -h, --help Show this help + +Scenarios: + happy_path + Goal: baseline rebalance scheduling from a heavily skewed delegation. + Setup: contract-seeded skew (single-validator staking target + large deposit). + Params: uses baseline defaults (unless overridden by environment). + Watch for: pending redelegations to underweight validators. + + caps + Goal: verify scheduling respects max_ops_per_block and max_move_per_op. + Setup: same contract-seeded skew as happy_path, but with tight scheduling caps. + Params: default poolrebalancer max_ops_per_block=1, max_move_per_op=1e18. + Watch for: capped move sizes and slower progression. + + threshold_boundary + Goal: verify tiny drift is ignored when threshold is high enough. + Setup: seed a small contract deposit with single-validator target. + Params: default poolrebalancer rebalance_threshold_bp=5000. + Watch for: little or no scheduling when drift stays below threshold. + + expansion + Goal: verify the target validator set can expand to bonded validators outside the initial seed set. + Setup: five validators; pool stakes from contract across three validators only (main + two minor deposits). + Params: max_target_validators=5, max_ops_per_block=1, max_move_per_op=1e19, larger minor seed amount. + Watch for: redelegations with dst outside the initial three-validator delegation set (expansion_progress in logs). + +Profiles: + slow max_ops_per_block=1, capped move per op + medium default balancing profile + fast more ops per block, no move cap + +Environment variables: + BASEDIR Test chain base dir (default: $HOME/.og-evm-devnet) + NODE_RPC RPC endpoint (default: tcp://127.0.0.1:26657) + CHAIN_ID Chain ID (default: 10740) + TX_FEES Fees for txs (default: $TX_FEES) + CHAIN_HOME Home for governance tx signer (default: $CHAIN_HOME) + + VAL0_MNEMONIC ... VALN_MNEMONIC Optional explicit mnemonics. Any missing values are auto-generated. + + POOLREBALANCER_MAX_TARGET_VALIDATORS + SCENARIO happy_path|caps|threshold_boundary|expansion + (aliases: baseline_3val, max_target_gt_bonded_3val, + target_set_expansion_5val — normalized in apply_scenario_defaults) + VALIDATOR_COUNT Validators to start (default 3; expansion defaults to 5 if unset) + DEMO_PROFILE slow|medium|fast tuning for rebalance visibility (default: medium) + POOLREBALANCER_THRESHOLD_BP + POOLREBALANCER_MAX_OPS_PER_BLOCK + POOLREBALANCER_MAX_MOVE_PER_OP + POOL_DELEGATOR_MODE contract|eoa (default: contract) + POOL_OWNER_PK Private key for deploying/configuring CommunityPool + POOL_DEPOSITOR_PK Private key for CommunityPool deposit txs during seeding + MODULE_EVM Poolrebalancer module EVM address + BOND_PRECOMPILE Bond precompile address + EVM_RPC EVM RPC endpoint for cast calls (default: http://127.0.0.1:8545) + POOL_CONTRACT_ADDR Optional existing CommunityPool EVM address to reuse + POOL_SEED_DEPOSIT_AMOUNT Main contract seed amount (raw uint, default: 200000000000000000000) + GOV_FROM Key name used for gov submit/vote (default: mykey) + GOV_HOME Home for gov signer keyring (auto-detected if empty) + GOV_DEPOSIT Gov proposal deposit (default: 10000000ogwei) + GOV_FEES Fees for gov submit/vote txs (default: 400000000000000ogwei) + GOV_WAIT_INITIAL Initial wait before checking proposal status (default: 20) + GOV_POLL_TIMEOUT Timeout waiting for params propagation seconds (default: 20) + GOV_STATUS_TIMEOUT Timeout waiting proposal to pass (default: 120) + + USER_FLOW_POOL_DELEGATOR_POLL_INTERVAL_SECS user_flow_multikey: seconds between checks when chain is up but address empty (default: 40) + USER_FLOW_CHAIN_NOT_READY_POLL_INTERVAL_SECS user_flow_multikey: poll while evmd/not-ready or query errors (default: 5) + USER_FLOW_POOL_DELEGATOR_MAX_WAIT_SECS user_flow_multikey: give up after this many seconds (default: 0 = no limit) + + COMMUNITY_POOL_EDGE_PHASES community_pool_edge_cases: comma-separated phases (default: auth if unset; or pass positional arg / all) + AUTH_NON_OWNER_ACCOUNT community_pool_edge_cases: dev account name for non-owner txs (default: dev1) + DRIFT_SKEW_WEI community_pool_edge_cases: added to totalStaked for drift test (default: 1e18 wei) + DRIFT_RECOVER_MAX_WAIT_SECS community_pool_edge_cases: wait for reconcile to match staking (default: 180) + POOL_DEL_BECH32 community_pool_edge_cases: optional pool delegator bech32 override + WITHDRAW_SIZING_ACCOUNT community_pool_edge_cases: dev account with LP units for withdraw test (default: dev2) + WITHDRAW_SIZING_FRACTION_BP community_pool_edge_cases: basis points of user units to withdraw (default: 1000 = 10%) + WITHDRAW_SIZING_CANDIDATE_BP_LIST ordered fallback BPs when withdraw reverts (default: 1000,500,200,100,50,20,10,5,1) + WITHDRAW_SIZING_PENDING_RESERVE_POLL_SECS optional wait for pendingRebalanceUnbondReserve>0 (default: 60; 0=skip) + WITHDRAW_SIZING_GAS_LIMIT gas limit for withdraw() tx (default: 9500000) + WITHDRAW_SIZING_AUTO_DEPOSIT if 1, auto approve+deposit when withdraw_sizing preconditions are missing (default: 1) + WITHDRAW_SIZING_AUTO_DEPOSIT_USERS number of dev accounts to auto-deposit from (dev0..devN-1, default: 3) + WITHDRAW_SIZING_AUTO_DEPOSIT_AMOUNT_WEI auto-deposit amount per account (default: 100000000000000000000) + WITHDRAW_SIZING_AUTO_DEPOSIT_INTERVAL_SECS seconds between auto-deposit txs (default: 1) + WITHDRAW_SIZING_TOTAL_STAKED_WAIT_SECS wait after auto-deposit for totalStaked>0 (default: 120) + LIQUIDITY_ACCOUNT liquidity phase: dev account for withdraw+claim flow (default: WITHDRAW_SIZING_ACCOUNT) + LIQUIDITY_FRACTION_BP liquidity phase: basis points of user units to withdraw (default: WITHDRAW_SIZING_FRACTION_BP) + LIQUIDITY_CANDIDATE_BP_LIST liquidity phase: ordered fallback BPs when withdraw reverts + LIQUIDITY_GAS_LIMIT liquidity phase: gas limit for withdraw()/claimWithdraw() txs + LIQUIDITY_MATURITY_MAX_WAIT_SECS liquidity phase: max wall-clock wait for maturity in optional stress (default: 300) + CLAIM_STRESS_INSUFFICIENT_LIQUID liquidity phase: if 1, best-effort retry matured claimWithdraw (default: 0) + CLAIM_STRESS_MAX_ATTEMPTS liquidity phase: retries after maturity in optional stress (default: 20) + CLAIM_STRESS_POLL_INTERVAL_SECS liquidity phase: seconds between optional stress retries (default: 2) + DUST_ACCOUNT dust phase: primary dev account for seed deposit / withdraw(1) revert (default: dev1) + DUST_SECONDARY_ACCOUNT dust phase: secondary dev account for deposit(1) revert (default: dev2) + DUST_SEED_DEPOSIT_AMOUNT_WEI dust phase: seed deposit amount before no-op stake + dust reverts (default: 1e18) + DUST_BOUNDARY_MAX_VALIDATORS dust phase: valid boundary maxValidators used in setConfig (default: 1) + DUST_HIGH_MIN_STAKE_AMOUNT_WEI dust phase: elevated minStakeAmount used to force stake() no-op (default: uint256 max) + REWARDS_ACCOUNT rewards phase: dev account that calls claimRewards() (default: dev1) + REWARDS_HARVEST_COUNT rewards phase: number of owner harvest() calls (default: 3) + REWARDS_HARVEST_INTERVAL_SECS rewards phase: sleep between harvests (default: 1) + REWARDS_REQUIRE_HARVEST_SUCCESS rewards phase: if 1, require harvest() to succeed; if 0, allow graceful skip (default: 0) + SKIP_EMPTY_POOL_HARVEST rewards phase: if 1, skip empty-pool harvest path with Forge pointer (default: 1) + + STAKING_UNBONDING_TIME Reduce so pending queues mature quickly (default: 30s) + STAKING_MAX_ENTRIES Raise/lower redelegation entry pressure (default: 100) + + POLL_SAMPLES Initial poll samples before giving up if no pending ops (default: 25) + POLL_SLEEP_SECS Seconds between samples (default: 2) + + IMBALANCE_MAIN_DELEGATION Large delegation to validator[0] + IMBALANCE_MINOR_DELEGATION Small delegations to validator[1], validator[2] + WATCH_COMPACT Compact lines for watch mode (default: false) + +Note: + Any variable set in the environment overrides scenario defaults when the script respects USER_SET_* flags. + +Examples: + # Standard rebalance flow + bash tests/e2e/poolrebalancer/rebalance_scenario_runner.sh --scenario happy_path --nodes 3 --profile medium + + # Cap-focused behavior + bash tests/e2e/poolrebalancer/rebalance_scenario_runner.sh --scenario caps --nodes 3 --profile slow + + # Threshold gating (expect no scheduling for small drift) + bash tests/e2e/poolrebalancer/rebalance_scenario_runner.sh --scenario threshold_boundary --nodes 3 + + # Target-set expansion (5 validators; pool initially delegated to 3) + bash tests/e2e/poolrebalancer/rebalance_scenario_runner.sh --scenario expansion --nodes 5 --profile medium + + bash tests/e2e/poolrebalancer/rebalance_scenario_runner.sh watch + + # After happy_path (or any scenario) has started the chain and deployed the pool: + bash tests/e2e/poolrebalancer/rebalance_scenario_runner.sh user_flow_multikey + +EOF +} + +# --- user_flow_multikey subcommand helpers (poll chain until pool is wired, then exec user_flow_multikey.sh) --- + +# First Error:/rpc line from evmd stderr (avoids dumping full Usage after failures). +_user_flow_evmd_error_summary() { + printf '%s\n' "$1" | awk ' + /^Error:/ { sub(/^Error:[[:space:]]*/, ""); print; exit } + /^rpc error:/ { print; exit } + NR==1 && length($0) { print } + ' +} + +_user_flow_tendermint_latest_height() { + curl -sS --max-time 2 "$(tendermint_status_url)" 2>/dev/null | jq -r '.result.sync_info.latest_block_height // empty' 2>/dev/null || echo "" +} + +# Poll evmd query poolrebalancer params until pool_delegator_address is non-empty (or timeout). +# Shorter interval while RPC returns "not ready" / no first block; longer once chain serves queries but gov not done. +wait_for_pool_delegator_address_configured() { + local interval="${USER_FLOW_POOL_DELEGATOR_POLL_INTERVAL_SECS:-40}" + local interval_chain_not_ready="${USER_FLOW_CHAIN_NOT_READY_POLL_INTERVAL_SECS:-5}" + local max_wait="${USER_FLOW_POOL_DELEGATOR_MAX_WAIT_SECS:-0}" + local start del qerr sleep_s err1 sync_h + start="$(date +%s)" + echo "==> Waiting for poolrebalancer.params.pool_delegator_address to be set (needed before user_flow_multikey.sh)" + echo " NODE_RPC=$NODE_RPC" + echo " After the chain is ready: poll every ${interval}s if the address is still empty" + echo " While evmd reports 'not ready' / no first block: poll every ${interval_chain_not_ready}s (USER_FLOW_CHAIN_NOT_READY_POLL_INTERVAL_SECS)" + if [[ "$max_wait" =~ ^[0-9]+$ ]] && (( max_wait > 0 )); then + echo " Max wait ${max_wait}s (unset USER_FLOW_POOL_DELEGATOR_MAX_WAIT_SECS or set 0 for no limit)" + else + echo " No max wait (interrupt with Ctrl+C); set USER_FLOW_POOL_DELEGATOR_MAX_WAIT_SECS to cap" + fi + while true; do + del="" + qerr="" + sleep_s="$interval" + sync_h="$(_user_flow_tendermint_latest_height)" + if ! qerr="$(evmd query poolrebalancer params --node "$NODE_RPC" -o json 2>&1)"; then + err1="$(_user_flow_evmd_error_summary "$qerr")" + if [[ "$qerr" == *"not ready"* ]] || [[ "$qerr" == *"first block"* ]] || [[ "$qerr" == *"invalid height"* ]]; then + echo "==> ($(date -u +%Y-%m-%dT%H:%M:%SZ)) still waiting: evmd app not ready yet (ABCI queries blocked until the first block is committed)" + echo " tendermint latest_block_height=${sync_h:-unknown} — start or wait for validators, then this will clear" + echo " evmd: ${err1:0:240}" + sleep_s="$interval_chain_not_ready" + else + echo "==> ($(date -u +%Y-%m-%dT%H:%M:%SZ)) still waiting: poolrebalancer query failed" + echo " tendermint latest_block_height=${sync_h:-unknown}" + echo " evmd: ${err1:0:240}" + sleep_s="$interval_chain_not_ready" + fi + else + del="$(printf '%s\n' "$qerr" | jq -r '.params.pool_delegator_address // empty' 2>/dev/null || echo "")" + if [[ -n "$del" && "$del" != "null" ]]; then + echo "==> pool_delegator_address is set: $del" + return 0 + fi + echo "==> ($(date -u +%Y-%m-%dT%H:%M:%SZ)) still waiting: pool_delegator_address is empty (chain is up — finish CommunityPool deploy + gov pool_delegator_address update)" + sleep_s="$interval" + fi + if [[ "$max_wait" =~ ^[0-9]+$ ]] && (( max_wait > 0 )); then + if (( $(date +%s) - start >= max_wait )); then + echo "error: timed out after ${max_wait}s waiting for pool_delegator_address" >&2 + return 1 + fi + fi + sleep "$sleep_s" + done +} + +# Preconditions: BASEDIR/dev_accounts.txt; chain up. Sets CHAIN_HOME=val0 for bech32 debug. Optional POOL_CONTRACT_ADDR skips wait. +run_user_flow_multikey_subcommand() { + local script="$ROOT_DIR/tests/e2e/poolrebalancer/user_flow_multikey.sh" + if [[ ! -f "$script" ]]; then + echo "error: missing $script" >&2 + exit 1 + fi + if [[ ! -f "$BASEDIR/dev_accounts.txt" ]]; then + echo "error: missing $BASEDIR/dev_accounts.txt" >&2 + echo "hint: start a devnet with this runner (or multi_node_startup) so dev accounts exist" >&2 + exit 1 + fi + # Runner defaults CHAIN_HOME to BASEDIR (repo root home); user_flow_multikey.sh expects val0 for evmd debug addr. + if [[ -z "${CHAIN_HOME:-}" ]] || [[ "${CHAIN_HOME}" == "${BASEDIR}" ]]; then + export CHAIN_HOME="$BASEDIR/val0" + fi + echo "==> user_flow_multikey: BASEDIR=$BASEDIR CHAIN_HOME=$CHAIN_HOME NODE_RPC=$NODE_RPC" + if [[ -n "${POOL_CONTRACT_ADDR:-}" ]]; then + echo "==> POOL_CONTRACT_ADDR is set; skipping wait for poolrebalancer.params.pool_delegator_address" + else + wait_for_pool_delegator_address_configured || exit 1 + fi + ensure_evm_rpc_ready || exit 1 + echo "==> EVM_RPC=$EVM_RPC — invoking user_flow_multikey.sh" + if [[ -n "${PARSED_USER_FLOW_STRESS_PROFILE:-}" ]]; then + export USER_FLOW_STRESS_PROFILE="$PARSED_USER_FLOW_STRESS_PROFILE" + echo "==> USER_FLOW_STRESS_PROFILE=$USER_FLOW_STRESS_PROFILE (from subcommand)" + fi + if [[ -n "${PARSED_USER_FLOW_USER_COUNT:-}" ]]; then + export USER_COUNT="$PARSED_USER_FLOW_USER_COUNT" + echo "==> USER_COUNT=$USER_COUNT (from subcommand)" + fi + if [[ -n "${PARSED_USER_FLOW_WITHDRAW_USERS:-}" ]]; then + export WITHDRAW_USERS="$PARSED_USER_FLOW_WITHDRAW_USERS" + echo "==> WITHDRAW_USERS=$WITHDRAW_USERS (from subcommand)" + fi + if [[ -n "${PARSED_USER_FLOW_MODE:-}" ]]; then + export USER_FLOW_MODE="$PARSED_USER_FLOW_MODE" + echo "==> USER_FLOW_MODE=$USER_FLOW_MODE (from subcommand)" + fi + if [[ -n "${PARSED_DEPOSIT_CONCURRENCY:-}" ]]; then + export DEPOSIT_CONCURRENCY="$PARSED_DEPOSIT_CONCURRENCY" + echo "==> DEPOSIT_CONCURRENCY=$DEPOSIT_CONCURRENCY (from subcommand)" + fi + if [[ -n "${PARSED_WITHDRAW_CONCURRENCY:-}" ]]; then + export WITHDRAW_CONCURRENCY="$PARSED_WITHDRAW_CONCURRENCY" + echo "==> WITHDRAW_CONCURRENCY=$WITHDRAW_CONCURRENCY (from subcommand)" + fi + if [[ -n "${PARSED_CLAIM_CONCURRENCY:-}" ]]; then + export CLAIM_CONCURRENCY="$PARSED_CLAIM_CONCURRENCY" + echo "==> CLAIM_CONCURRENCY=$CLAIM_CONCURRENCY (from subcommand)" + fi + if [[ -n "${PARSED_CLAIM_REWARDS_CONCURRENCY:-}" ]]; then + export CLAIM_REWARDS_CONCURRENCY="$PARSED_CLAIM_REWARDS_CONCURRENCY" + echo "==> CLAIM_REWARDS_CONCURRENCY=$CLAIM_REWARDS_CONCURRENCY (from subcommand)" + fi + if [[ -n "${PARSED_BATCH_DELAY_MS:-}" ]]; then + export BATCH_DELAY_MS="$PARSED_BATCH_DELAY_MS" + echo "==> BATCH_DELAY_MS=$BATCH_DELAY_MS (from subcommand)" + fi + bash "$script" +} + +user_flow_chain_ready() { + local status_url h + status_url="$(tendermint_status_url)" + h="$(curl -sS --max-time 1 "$status_url" 2>/dev/null | jq -r '.result.sync_info.latest_block_height // "0"' 2>/dev/null || echo "0")" + [[ "$h" =~ ^[0-9]+$ ]] || h=0 + (( h > 0 )) +} + +user_flow_pool_delegator_ready() { + local out del + out="$(evmd query poolrebalancer params --node "$NODE_RPC" -o json 2>/dev/null || true)" + del="$(echo "$out" | jq -r '.params.pool_delegator_address // empty' 2>/dev/null || true)" + [[ -n "$del" ]] +} + +# Preconditions: BASEDIR/dev_accounts.txt; chain up. Optional POOL_CONTRACT_ADDR skips wait. +run_community_pool_edge_cases_subcommand() { + local script="$ROOT_DIR/tests/e2e/poolrebalancer/community_pool_edge_cases.sh" + if [[ ! -f "$script" ]]; then + echo "error: missing $script" >&2 + exit 1 + fi + if [[ ! -f "$BASEDIR/dev_accounts.txt" ]]; then + echo "error: missing $BASEDIR/dev_accounts.txt" >&2 + echo "hint: start a devnet with this runner (or multi_node_startup) so dev accounts exist" >&2 + exit 1 + fi + if [[ -z "${CHAIN_HOME:-}" ]] || [[ "${CHAIN_HOME}" == "${BASEDIR}" ]]; then + export CHAIN_HOME="$BASEDIR/val0" + fi + echo "==> community_pool_edge_cases: BASEDIR=$BASEDIR CHAIN_HOME=$CHAIN_HOME NODE_RPC=$NODE_RPC" + if [[ -n "${POOL_CONTRACT_ADDR:-}" ]]; then + echo "==> POOL_CONTRACT_ADDR is set; skipping wait for poolrebalancer.params.pool_delegator_address" + else + wait_for_pool_delegator_address_configured || exit 1 + fi + ensure_evm_rpc_ready || exit 1 + echo "==> EVM_RPC=$EVM_RPC — invoking community_pool_edge_cases.sh" + if [[ -n "${PARSED_COMMUNITY_POOL_EDGE_PHASES:-}" ]]; then + if [[ "$PARSED_COMMUNITY_POOL_EDGE_PHASES" == "all" ]]; then + export COMMUNITY_POOL_EDGE_PHASES="auth,drift,withdraw_sizing,liquidity,dust,rewards" + else + export COMMUNITY_POOL_EDGE_PHASES="$PARSED_COMMUNITY_POOL_EDGE_PHASES" + fi + echo "==> COMMUNITY_POOL_EDGE_PHASES=$COMMUNITY_POOL_EDGE_PHASES (from subcommand)" + fi + bash "$script" +} + +parse_cli_args() { + local subcommand="" + PARSED_COMMUNITY_POOL_EDGE_PHASES="" + PARSED_USER_FLOW_STRESS_PROFILE="" + PARSED_USER_FLOW_USER_COUNT="" + PARSED_USER_FLOW_WITHDRAW_USERS="" + PARSED_USER_FLOW_MODE="" + PARSED_DEPOSIT_CONCURRENCY="" + PARSED_WITHDRAW_CONCURRENCY="" + PARSED_CLAIM_CONCURRENCY="" + PARSED_CLAIM_REWARDS_CONCURRENCY="" + PARSED_BATCH_DELAY_MS="" + while [[ $# -gt 0 ]]; do + case "$1" in + watch) + subcommand="watch" + shift + ;; + help) + subcommand="help" + shift + ;; + user_flow_multikey) + subcommand="user_flow_multikey" + shift + ;; + community_pool_edge_cases) + subcommand="community_pool_edge_cases" + shift + ;; + -n|--nodes) + if [[ $# -lt 2 ]]; then + echo "missing value for $1" >&2 + exit 1 + fi + VALIDATOR_COUNT="$2" + shift 2 + ;; + -s|--scenario) + if [[ $# -lt 2 ]]; then + echo "missing value for $1" >&2 + exit 1 + fi + SCENARIO="$2" + shift 2 + ;; + -p|--profile) + if [[ $# -lt 2 ]]; then + echo "missing value for $1" >&2 + exit 1 + fi + DEMO_PROFILE="$2" + shift 2 + ;; + -h|--help) + subcommand="help" + shift + ;; + --stress-profile) + if [[ $# -lt 2 ]]; then + echo "missing value for $1" >&2 + exit 1 + fi + PARSED_USER_FLOW_STRESS_PROFILE="$2" + shift 2 + ;; + --user-count) + if [[ $# -lt 2 ]]; then + echo "missing value for $1" >&2 + exit 1 + fi + if [[ ! "$2" =~ ^[0-9]+$ ]] || (( "$2" < 1 )); then + echo "error: --user-count must be a positive integer (got: $2)" >&2 + exit 1 + fi + PARSED_USER_FLOW_USER_COUNT="$2" + shift 2 + ;; + --withdraw-users) + if [[ $# -lt 2 ]]; then + echo "missing value for $1" >&2 + exit 1 + fi + if [[ ! "$2" =~ ^[0-9]+$ ]]; then + echo "error: --withdraw-users must be a non-negative integer (got: $2)" >&2 + exit 1 + fi + PARSED_USER_FLOW_WITHDRAW_USERS="$2" + shift 2 + ;; + --flow-mode) + if [[ $# -lt 2 ]]; then + echo "missing value for $1" >&2 + exit 1 + fi + if [[ "$2" != "serial" && "$2" != "parallel" ]]; then + echo "error: --flow-mode must be serial or parallel (got: $2)" >&2 + exit 1 + fi + PARSED_USER_FLOW_MODE="$2" + shift 2 + ;; + --deposit-concurrency) + if [[ $# -lt 2 ]]; then + echo "missing value for $1" >&2 + exit 1 + fi + if [[ ! "$2" =~ ^[0-9]+$ ]] || (( "$2" < 1 )); then + echo "error: --deposit-concurrency must be a positive integer (got: $2)" >&2 + exit 1 + fi + PARSED_DEPOSIT_CONCURRENCY="$2" + shift 2 + ;; + --withdraw-concurrency) + if [[ $# -lt 2 ]]; then + echo "missing value for $1" >&2 + exit 1 + fi + if [[ ! "$2" =~ ^[0-9]+$ ]] || (( "$2" < 1 )); then + echo "error: --withdraw-concurrency must be a positive integer (got: $2)" >&2 + exit 1 + fi + PARSED_WITHDRAW_CONCURRENCY="$2" + shift 2 + ;; + --claim-concurrency) + if [[ $# -lt 2 ]]; then + echo "missing value for $1" >&2 + exit 1 + fi + if [[ ! "$2" =~ ^[0-9]+$ ]] || (( "$2" < 1 )); then + echo "error: --claim-concurrency must be a positive integer (got: $2)" >&2 + exit 1 + fi + PARSED_CLAIM_CONCURRENCY="$2" + shift 2 + ;; + --claim-rewards-concurrency) + if [[ $# -lt 2 ]]; then + echo "missing value for $1" >&2 + exit 1 + fi + if [[ ! "$2" =~ ^[0-9]+$ ]] || (( "$2" < 1 )); then + echo "error: --claim-rewards-concurrency must be a positive integer (got: $2)" >&2 + exit 1 + fi + PARSED_CLAIM_REWARDS_CONCURRENCY="$2" + shift 2 + ;; + --batch-delay-ms) + if [[ $# -lt 2 ]]; then + echo "missing value for $1" >&2 + exit 1 + fi + if [[ ! "$2" =~ ^[0-9]+$ ]]; then + echo "error: --batch-delay-ms must be a non-negative integer (got: $2)" >&2 + exit 1 + fi + PARSED_BATCH_DELAY_MS="$2" + shift 2 + ;; + --) + shift + break + ;; + run) + # Explicit no-op command for readability. + shift + ;; + *) + if [[ "$subcommand" == "community_pool_edge_cases" && -z "${PARSED_COMMUNITY_POOL_EDGE_PHASES:-}" ]]; then + PARSED_COMMUNITY_POOL_EDGE_PHASES="$1" + shift + else + echo "unknown argument: $1" >&2 + return 1 + fi + ;; + esac + done + PARSED_SUBCOMMAND="$subcommand" + return 0 +} + +require_bin() { + command -v "$1" >/dev/null 2>&1 || { echo "missing dependency: $1" >&2; exit 1; } +} + +stop_nodes() { + # Aggressive cleanup: multi_node_startup.sh launches `evmd start` processes directly. + pkill -f "evmd start" >/dev/null 2>&1 || true + pkill -f "multi_node_startup.sh" >/dev/null 2>&1 || true + # Give the OS a moment to release RPC/P2P ports. + sleep 1 +} + +cleanup_log_streams() { + if (( ${#LOG_STREAM_PIDS[@]} == 0 )); then + return 0 + fi + for pid in "${LOG_STREAM_PIDS[@]}"; do + kill "$pid" >/dev/null 2>&1 || true + done + LOG_STREAM_PIDS=() +} + +start_validator_log_streams() { + mkdir -p "$BASEDIR/logs" + for v in $(seq 0 $((VALIDATOR_COUNT - 1))); do + local f="$BASEDIR/logs/val${v}.log" + touch "$f" + tail -n 0 -F "$f" | sed -u "s/^/[val${v}] /" & + LOG_STREAM_PIDS+=("$!") + done +} + +wait_for_height() { + local timeout_secs="${1:-30}" + local start status_url + status_url="$(tendermint_status_url)" + start="$(date +%s)" + while true; do + local h + h="$(curl -sS --max-time 1 "$status_url" 2>/dev/null | jq -r '.result.sync_info.latest_block_height' 2>/dev/null || echo 0)" + if [[ "$h" != "0" ]]; then + echo "$h" + return 0 + fi + if (( $(date +%s) - start > timeout_secs )); then + echo "timed out waiting for height > 0" >&2 + return 1 + fi + sleep 1 + done +} + +tendermint_status_url() { + local hp="${NODE_RPC#tcp://}" + hp="${hp#http://}" + hp="${hp#https://}" + printf 'http://%s/status' "$hp" +} + +derive_evm_rpc_from_node_rpc() { + local node="$1" + local hostport host port idx jsonrpc_port + hostport="${node#tcp://}" + hostport="${hostport#http://}" + hostport="${hostport#https://}" + host="${hostport%%:*}" + port="${hostport##*:}" + if [[ "$port" =~ ^[0-9]+$ ]] && (( port >= 26657 )) && (( (port - 26657) % 100 == 0 )); then + idx=$(( (port - 26657) / 100 )) + jsonrpc_port=$((8545 + (idx * 10))) + printf 'http://%s:%s' "$host" "$jsonrpc_port" + return 0 + fi + return 1 +} + +ensure_evm_rpc_ready() { + CURRENT_PHASE="ensure_evm_rpc_ready" + local candidates=() + local derived="" + local c start now + + candidates+=("$EVM_RPC") + if derived="$(derive_evm_rpc_from_node_rpc "$NODE_RPC" 2>/dev/null || true)"; then + candidates+=("$derived") + fi + candidates+=("http://127.0.0.1:8545" "http://127.0.0.1:8555" "http://127.0.0.1:8565" "http://127.0.0.1:8575") + + echo "==> Waiting for EVM RPC readiness" + start="$(date +%s)" + while true; do + for c in "${candidates[@]}"; do + [[ -z "$c" ]] && continue + if cast chain-id --rpc-url "$c" >/dev/null 2>&1; then + if [[ "$EVM_RPC" != "$c" ]]; then + echo "==> Using detected EVM RPC endpoint: $c" + fi + EVM_RPC="$c" + return 0 + fi + done + now="$(date +%s)" + if (( now - start > 90 )); then + echo "error: no reachable EVM RPC endpoint found after 90s" >&2 + echo "tried: ${candidates[*]}" >&2 + return 1 + fi + sleep 2 + done +} + +dev_account_private_key_from_file() { + local account_name="$1" + local f="$BASEDIR/dev_accounts.txt" + [[ -f "$f" ]] || return 1 + awk -v name="$account_name" ' + $0 ~ ("^" name ":") {in_block=1; next} + in_block && $1=="private_key:" {print $2; exit} + in_block && /^[^[:space:]]/ {in_block=0} + ' "$f" +} + +resolve_pool_runtime_keys() { + if [[ "$POOL_DELEGATOR_MODE" != "contract" ]]; then + return 0 + fi + local dev0_pk dev1_pk + if [[ "$POOL_OWNER_PK" == "$DEFAULT_POOL_OWNER_PK" ]]; then + dev0_pk="$(dev_account_private_key_from_file "dev0" || true)" + if [[ -n "$dev0_pk" ]]; then + POOL_OWNER_PK="$dev0_pk" + echo "==> Using generated dev0 private key as POOL_OWNER_PK" + fi + fi + if [[ "$POOL_DEPOSITOR_PK" == "$DEFAULT_POOL_DEPOSITOR_PK" ]]; then + dev1_pk="$(dev_account_private_key_from_file "dev1" || true)" + if [[ -n "$dev1_pk" ]]; then + POOL_DEPOSITOR_PK="$dev1_pk" + echo "==> Using generated dev1 private key as POOL_DEPOSITOR_PK" + fi + fi +} + +resolve_governance_signer() { + CURRENT_PHASE="resolve_governance_signer" + local requested_home candidate_from candidate_home + requested_home="${GOV_HOME:-$CHAIN_HOME}" + + # 1) honor configured GOV_FROM when available + if evmd keys show "$GOV_FROM" --keyring-backend "$KEYRING" --home "$requested_home" >/dev/null 2>&1; then + RESOLVED_GOV_FROM="$GOV_FROM" + RESOLVED_GOV_HOME="$requested_home" + echo "==> Using configured governance signer: from=$RESOLVED_GOV_FROM home=$RESOLVED_GOV_HOME" + return 0 + fi + + # 2) fallback to val0 in validator 0 home (multi_node_startup default key naming) + candidate_from="val0" + candidate_home="$HOME0" + if evmd keys show "$candidate_from" --keyring-backend "$KEYRING" --home "$candidate_home" >/dev/null 2>&1; then + RESOLVED_GOV_FROM="$candidate_from" + RESOLVED_GOV_HOME="$candidate_home" + echo "==> Falling back to governance signer: from=$RESOLVED_GOV_FROM home=$RESOLVED_GOV_HOME" + return 0 + fi + + # 3) fallback to mykey in CHAIN_HOME for local_node-style setups + candidate_from="mykey" + candidate_home="$CHAIN_HOME" + if evmd keys show "$candidate_from" --keyring-backend "$KEYRING" --home "$candidate_home" >/dev/null 2>&1; then + RESOLVED_GOV_FROM="$candidate_from" + RESOLVED_GOV_HOME="$candidate_home" + echo "==> Falling back to governance signer: from=$RESOLVED_GOV_FROM home=$RESOLVED_GOV_HOME" + return 0 + fi + + echo "error: could not resolve a governance signer key for submit/vote" >&2 + echo "tried: GOV_FROM=$GOV_FROM home=$requested_home, val0@$HOME0, mykey@$CHAIN_HOME" >&2 + return 1 +} + +vote_proposal_with_validator_majority() { + local proposal_id="$1" + local success_votes=0 + local required_votes=$((VALIDATOR_COUNT / 2 + 1)) + local i from home vote_out vote_code vote_log + + echo "==> Voting proposal_id=$proposal_id with validator keys (target yes votes: $required_votes/$VALIDATOR_COUNT)" + for i in $(seq 0 $((VALIDATOR_COUNT - 1))); do + from="val${i}" + home="$BASEDIR/val${i}" + if ! evmd keys show "$from" --keyring-backend "$KEYRING" --home "$home" >/dev/null 2>&1; then + continue + fi + vote_out="$(evmd tx gov vote "$proposal_id" yes \ + --from "$from" --keyring-backend "$KEYRING" --home "$home" \ + --chain-id "$CHAIN_ID" --node "$NODE_RPC" \ + --fees "$GOV_FEES" --gas auto --gas-adjustment 1.3 -y -o json 2>/dev/null || true)" + vote_code="$(echo "$vote_out" | jq -r '.code // 0' 2>/dev/null || echo 1)" + if [[ "$vote_code" == "0" ]]; then + success_votes=$((success_votes + 1)) + else + vote_log="$(echo "$vote_out" | jq -r '.raw_log // .log // empty' 2>/dev/null || true)" + echo "warning: vote from $from failed (code=$vote_code): $vote_log" >&2 + fi + done + + # Fallback vote path using resolved signer (for local_node-style key layouts). + if (( success_votes < required_votes )); then + vote_out="$(evmd tx gov vote "$proposal_id" yes \ + --from "$RESOLVED_GOV_FROM" --keyring-backend "$KEYRING" --home "$RESOLVED_GOV_HOME" \ + --chain-id "$CHAIN_ID" --node "$NODE_RPC" \ + --fees "$GOV_FEES" --gas auto --gas-adjustment 1.3 -y -o json 2>/dev/null || true)" + vote_code="$(echo "$vote_out" | jq -r '.code // 0' 2>/dev/null || echo 1)" + if [[ "$vote_code" == "0" ]]; then + success_votes=$((success_votes + 1)) + fi + fi + + echo "governance_votes_submitted=$success_votes" + if (( success_votes < required_votes )); then + echo "error: insufficient successful governance votes submitted ($success_votes/$required_votes required)" >&2 + return 1 + fi + return 0 +} + +auto_generate_validator_mnemonic() { + local idx="$1" + local tmp_home + local key_name + local out + local mnemonic + + tmp_home="$(mktemp -d "${TMPDIR:-/tmp}/poolrebalancer-mnemonic-${idx}-XXXXXX")" + key_name="autoval${idx}" + out="$(evmd keys add "$key_name" --keyring-backend test --algo eth_secp256k1 --home "$tmp_home" 2>&1)" + mnemonic="$(echo "$out" | awk 'NF{line=$0} END{print line}')" + rm -rf "$tmp_home" + + if [[ -z "$mnemonic" ]]; then + echo "failed to auto-generate mnemonic for validator $idx" >&2 + return 1 + fi + echo "$mnemonic" +} + +resolve_mnemonics() { + local missing=() + local need="$VALIDATOR_COUNT" + + for i in $(seq 0 $((need - 1))); do + local name="VAL${i}_MNEMONIC" + local current="${!name:-}" + if [[ -z "$current" ]]; then + current="$(auto_generate_validator_mnemonic "$i" || true)" + if [[ -n "$current" ]]; then + export "$name=$current" + fi + fi + if [[ -z "${!name:-}" ]]; then + missing+=("$name") + fi + done + + if (( ${#missing[@]} > 0 )); then + echo "missing required mnemonics: ${missing[*]}" >&2 + echo "set them in env or ensure validator mnemonic generation is available" >&2 + exit 1 + fi +} + +patch_genesis_poolrebalancer_params() { + local gen0="$BASEDIR/val0/config/genesis.json" + local tmp="$BASEDIR/val0/config/genesis.tmp.json" + + jq --argjson maxTargets "$POOLREBALANCER_MAX_TARGET_VALIDATORS" \ + --argjson thr "$POOLREBALANCER_THRESHOLD_BP" \ + --argjson maxOps "$POOLREBALANCER_MAX_OPS_PER_BLOCK" \ + --arg maxMove "$POOLREBALANCER_MAX_MOVE_PER_OP" \ + ' .app_state.poolrebalancer.params.max_target_validators = $maxTargets + | .app_state.poolrebalancer.params.rebalance_threshold_bp = $thr + | .app_state.poolrebalancer.params.max_ops_per_block = $maxOps + | .app_state.poolrebalancer.params.max_move_per_op = $maxMove + ' "$gen0" > "$tmp" + + mv "$tmp" "$gen0" + for v in $(seq 1 $((VALIDATOR_COUNT - 1))); do + cp "$gen0" "$BASEDIR/val${v}/config/genesis.json" + done + + evmd genesis validate-genesis --home "$BASEDIR/val0" >/dev/null +} + +patch_genesis_staking_params() { + local gen0="$BASEDIR/val0/config/genesis.json" + local tmp="$BASEDIR/val0/config/genesis.tmp.json" + + jq --arg unbond "$STAKING_UNBONDING_TIME" \ + --argjson maxEntries "$STAKING_MAX_ENTRIES" \ + ' .app_state.staking.params.unbonding_time = $unbond + | .app_state.staking.params.max_entries = $maxEntries + ' "$gen0" > "$tmp" + + mv "$tmp" "$gen0" +} + +evmd_debug_addr() { + local addr="$1" + if [[ -n "${CHAIN_HOME:-}" && -d "$CHAIN_HOME" ]]; then + evmd debug addr "$addr" --home "$CHAIN_HOME" 2>/dev/null || evmd debug addr "$addr" 2>/dev/null || true + else + evmd debug addr "$addr" 2>/dev/null || true + fi +} + +resolve_evm_hex_from_bech32() { + local bech="$1" + local dbg hex + dbg="$(evmd_debug_addr "$bech")" + hex="$(printf '%s\n' "$dbg" | awk '{for(i=1;i<=NF;i++){if($i ~ /^0x[0-9a-fA-F]{40}$/){print $i; exit}}}')" + if [[ -z "$hex" ]]; then + # Fallback: derive from bech32 bytes using evmd debug output fields. + hex="$(printf '%s\n' "$dbg" | awk -F': ' '/Address \(hex\)/{print $2; exit}')" + fi + if [[ "$hex" =~ ^[0-9a-fA-F]{40}$ ]]; then + hex="0x$hex" + fi + printf '%s' "$hex" +} + +pool_addr_from_cast_deploy_output() { + local raw="$1" + local addr="" + if addr="$(printf '%s' "$raw" | jq -r '.contractAddress // .receipt.contractAddress // empty' 2>/dev/null)" && + [[ -n "$addr" && "$addr" != "null" ]]; then + printf '%s' "$addr" + return 0 + fi + if [[ "$raw" =~ \"contractAddress\"[[:space:]]*:[[:space:]]*\"(0x[0-9a-fA-F]{40})\" ]]; then + printf '%s' "${BASH_REMATCH[1]}" + return 0 + fi + return 1 +} + +resolve_pool_bech32() { + if [[ -z "$POOL_EVM_ADDR" ]]; then + echo "error: resolve_pool_bech32 requires POOL_EVM_ADDR" >&2 + exit 1 + fi + POOL_DEL_ADDR="$(evmd_debug_addr "$POOL_EVM_ADDR" | awk '/Bech32 Acc/{print $3; exit}')" + if [[ -z "$POOL_DEL_ADDR" ]]; then + echo "error: could not resolve bech32 address for pool contract: $POOL_EVM_ADDR" >&2 + exit 1 + fi +} + +normalize_cast_uint256_output() { + local s="${1:-}" + s="${s//$'\r'/}" + s="${s%%$'\n'*}" + s="${s%% *}" + s="${s//$'\t'/}" + [[ "$s" =~ ^[0-9]+$ ]] && { printf '%s' "$s"; return 0; } + # cast call usually returns hex (e.g. 0x000...001); decode so minStake / ledger reads match chain. + if [[ "$s" =~ ^0x[0-9a-fA-F]+$ ]] && command -v python3 >/dev/null 2>&1; then + python3 -c "print(int('$s',16))" 2>/dev/null && return 0 + fi + return 1 +} + +pool_call_uint256() { + local sig="$1" + local raw norm + [[ -z "${POOL_EVM_ADDR:-}" ]] && { printf 'n/a'; return 0; } + raw="$(cast call --rpc-url "$EVM_RPC" "$POOL_EVM_ADDR" "$sig" 2>/dev/null || true)" + if norm="$(normalize_cast_uint256_output "$raw")"; then + printf '%s' "$norm" + else + printf 'n/a' + fi +} + +wait_evm_nonce_settled_for_pk() { + local pk="$1" + local deadline_sec="${2:-45}" + local addr pending latest t0 + addr="$(cast wallet address --private-key "$pk")" + t0="$(date +%s)" + while true; do + pending="$(cast nonce --rpc-url "$EVM_RPC" --block pending "$addr" 2>/dev/null || true)" + latest="$(cast nonce --rpc-url "$EVM_RPC" --block latest "$addr" 2>/dev/null || true)" + [[ -z "$pending" || -z "$latest" ]] && return 0 + [[ "$pending" == "$latest" ]] && return 0 + if (( $(date +%s) - t0 > deadline_sec )); then + return 0 + fi + sleep 1 + done +} + +_bumped_gas_price() { + local gp gp2 + gp="$(cast gas-price --rpc-url "$EVM_RPC" 2>/dev/null || echo 1000000)" + gp2="$(awk -v g="$gp" 'BEGIN { print int(g) * 2 }' 2>/dev/null || true)" + [[ -z "$gp2" || "$gp2" == "0" ]] && gp2="$gp" + printf '%s' "$gp2" +} + +deposit_to_pool_once() { + local amount="$1" + local approve_json="${2:-/tmp/pool_seed_approve.json}" + local deposit_json="${3:-/tmp/pool_seed_deposit.json}" + local errf gp2 + wait_evm_nonce_settled_for_pk "$POOL_DEPOSITOR_PK" 45 + errf="$(mktemp -t pool_dep.XXXXXX)" + if cast send --json --rpc-url "$EVM_RPC" --private-key "$POOL_DEPOSITOR_PK" "$BOND_PRECOMPILE" \ + "approve(address,uint256)" "$POOL_EVM_ADDR" "$amount" >"$approve_json" 2>"$errf" \ + && cast send --json --rpc-url "$EVM_RPC" --private-key "$POOL_DEPOSITOR_PK" "$POOL_EVM_ADDR" \ + "deposit(uint256)" "$amount" >"$deposit_json" 2>"$errf"; then + rm -f "$errf" + return 0 + fi + gp2="$(_bumped_gas_price)" + cast send --json --rpc-url "$EVM_RPC" --private-key "$POOL_DEPOSITOR_PK" --gas-price "$gp2" "$BOND_PRECOMPILE" \ + "approve(address,uint256)" "$POOL_EVM_ADDR" "$amount" >"$approve_json" 2>"$errf" \ + && cast send --json --rpc-url "$EVM_RPC" --private-key "$POOL_DEPOSITOR_PK" --gas-price "$gp2" "$POOL_EVM_ADDR" \ + "deposit(uint256)" "$amount" >"$deposit_json" 2>"$errf" + local st=$? + rm -f "$errf" + return "$st" +} + +coin_amount_to_uint() { + local coin="$1" + local amount + amount="$(printf '%s' "$coin" | sed -E 's/^([0-9]+).*/\1/')" + if [[ -z "$amount" || ! "$amount" =~ ^[0-9]+$ ]]; then + echo "error: could not parse numeric amount from coin string: $coin" >&2 + return 1 + fi + printf '%s' "$amount" +} + +# Printed before deploy + wiring so every scenario run explains pool setup, timing, and active knobs. +log_pool_contract_setup_banner() { + echo + echo "──────────────────────────────────────────────────────────────────────────────" + echo "CommunityPool setup (scenario=$SCENARIO)" + echo " What happens next: deploy or reuse contract → set automationCaller (EVM tx) → governance" + echo " updates poolrebalancer.pool_delegator_address to this pool’s bech32 account (vote + propagate)." + echo " Deploy timing: fresh deploy is ONE cast create tx. Expect ~15–90s wall-clock on a local devnet:" + echo " blocks must include the tx; nonce/gas contention can add retries. Reusing POOL_CONTRACT_ADDR skips deploy." + echo " Gov wiring timing: proposal + majority votes + param propagation (often ~1–3 min; cap $GOV_STATUS_TIMEOUT s)." + echo " Typical local devnet: pool_delegator_address usually shows up around block heights ~30–32 (varies with block time / votes)." + echo " Params already chosen for this run (genesis + scenario):" + echo " poolrebalancer: max_target_validators=$POOLREBALANCER_MAX_TARGET_VALIDATORS threshold_bp=$POOLREBALANCER_THRESHOLD_BP" + echo " max_ops_per_block=$POOLREBALANCER_MAX_OPS_PER_BLOCK max_move_per_op=$POOLREBALANCER_MAX_MOVE_PER_OP" + echo " staking: unbonding_time=$STAKING_UNBONDING_TIME max_entries=$STAKING_MAX_ENTRIES" + echo " validators: VALIDATOR_COUNT=$VALIDATOR_COUNT EVM_RPC=$EVM_RPC" + echo "──────────────────────────────────────────────────────────────────────────────" + echo +} + +deploy_pool_contract_if_needed() { + CURRENT_PHASE="deploy_pool_contract" + if [[ "$POOL_DELEGATOR_MODE" != "contract" ]]; then + echo "error: unsupported POOL_DELEGATOR_MODE=$POOL_DELEGATOR_MODE (phase1 supports contract only)" >&2 + exit 1 + fi + if [[ -n "$POOL_CONTRACT_ADDR" ]]; then + POOL_EVM_ADDR="$POOL_CONTRACT_ADDR" + echo "==> Reusing existing CommunityPool (POOL_CONTRACT_ADDR=$POOL_CONTRACT_ADDR) — no deploy tx; address ready immediately." + else + echo "==> Deploying CommunityPool contract (contract creation via cast send --create)" + echo " Typical wait: one mined block + any gas-price retry; if this hangs, check validator logs and EVM RPC reachability." + local owner bytecode ctor_args data deploy_raw deploy_err owner_balance gp2 + if ! cast chain-id --rpc-url "$EVM_RPC" >/dev/null 2>&1; then + echo "error: EVM RPC is not reachable at $EVM_RPC (cast chain-id failed)" >&2 + exit 1 + fi + owner="$(cast wallet address --private-key "$POOL_OWNER_PK")" + owner_balance="$(cast balance --rpc-url "$EVM_RPC" "$owner" 2>/dev/null || echo "0")" + if [[ "$owner_balance" =~ ^[0-9]+$ ]] && [[ "$owner_balance" == "0" ]]; then + echo "warning: pool owner address $owner has zero EVM balance on $EVM_RPC; deploy may fail" >&2 + fi + bytecode="$(jq -r '.bytecode // empty' "$ROOT_DIR/contracts/solidity/pool/CommunityPool.json" 2>/dev/null || true)" + if [[ -z "$bytecode" || "$bytecode" == "null" ]]; then + echo "error: missing CommunityPool bytecode in contracts/solidity/pool/CommunityPool.json" >&2 + exit 1 + fi + echo "==> Constructor minStakeAmount=1" + ctor_args="$(cast abi-encode "constructor(address,uint32,uint32,uint256,address)" "$BOND_PRECOMPILE" 10 5 1 "$owner")" + data="${bytecode}${ctor_args#0x}" + deploy_err="$(mktemp -t pool_deploy_err.XXXXXX)" + deploy_raw="$(cast send --json --rpc-url "$EVM_RPC" --private-key "$POOL_OWNER_PK" --create "$data" 2>"$deploy_err" || true)" + if [[ -z "$deploy_raw" ]]; then + gp2="$(_bumped_gas_price)" + deploy_raw="$(cast send --json --rpc-url "$EVM_RPC" --private-key "$POOL_OWNER_PK" --gas-price "$gp2" --create "$data" 2>"$deploy_err" || true)" + fi + if [[ -z "$deploy_raw" ]]; then + echo "error: failed to deploy CommunityPool contract" >&2 + echo "detail: $(tr '\n' ' ' <"$deploy_err" | head -c 500)" >&2 + rm -f "$deploy_err" + exit 1 + fi + if ! POOL_EVM_ADDR="$(pool_addr_from_cast_deploy_output "$deploy_raw")" || [[ -z "$POOL_EVM_ADDR" ]]; then + echo "error: could not parse CommunityPool contract address from deploy output" >&2 + echo "deploy_output: $(printf '%s' "$deploy_raw" | head -c 500)" >&2 + echo "deploy_error: $(tr '\n' ' ' <"$deploy_err" | head -c 500)" >&2 + rm -f "$deploy_err" + exit 1 + fi + rm -f "$deploy_err" + fi + resolve_pool_bech32 + echo "pool_contract_evm=$POOL_EVM_ADDR pool_delegator_bech32=$POOL_DEL_ADDR" +} + +set_pool_automation_caller() { + CURRENT_PHASE="set_pool_automation_caller" + echo "==> Setting CommunityPool automationCaller=$MODULE_EVM (single EVM tx; poolrebalancer module must be this caller)" + cast send --json --rpc-url "$EVM_RPC" --private-key "$POOL_OWNER_PK" \ + "$POOL_EVM_ADDR" "setAutomationCaller(address)" "$MODULE_EVM" >/dev/null +} + +set_pool_delegator_param_runtime() { + CURRENT_PHASE="set_pool_delegator_param_runtime" + echo "==> Governance: set poolrebalancer.params.pool_delegator_address=$POOL_DEL_ADDR" + echo " This can take up to ~$GOV_STATUS_TIMEOUT s (vote + PROPOSAL_STATUS_PASSED + param poll) — longer than deploy on some machines." + local gov_auth current proposal_json status current_addr t0 elapsed submit_out proposal_id before_latest after_latest submit_code submit_log vote_out vote_code vote_log + resolve_governance_signer + gov_auth="$(evmd query auth module-account gov --node "$NODE_RPC" -o json | jq -r '.account.value.address')" + current="$(evmd query poolrebalancer params --node "$NODE_RPC" -o json)" + before_latest="$(evmd query gov proposals --node "$NODE_RPC" -o json 2>/dev/null | jq -r '[.proposals[]?.id | tonumber] | max // 0')" + proposal_json="$(echo "$current" | jq --arg gov "$gov_auth" --arg del "$POOL_DEL_ADDR" --arg dep "$GOV_DEPOSIT" '{ + messages:[{ + "@type":"/cosmos.poolrebalancer.v1.MsgUpdateParams", + authority:$gov, + params:{ + pool_delegator_address:$del, + max_target_validators:.params.max_target_validators, + rebalance_threshold_bp:.params.rebalance_threshold_bp, + max_ops_per_block:.params.max_ops_per_block, + max_move_per_op:.params.max_move_per_op + } + }], + metadata:"", + deposit:$dep, + title:"Set pool delegator for rebalance scenario runner", + summary:"Set CommunityPool account for poolrebalancer runtime scenarios.", + expedited:false + }')" + submit_out="$(evmd tx gov submit-proposal <(echo "$proposal_json") \ + --from "$RESOLVED_GOV_FROM" --keyring-backend "$KEYRING" --home "$RESOLVED_GOV_HOME" \ + --chain-id "$CHAIN_ID" --node "$NODE_RPC" \ + --fees "$GOV_FEES" --gas auto --gas-adjustment 1.5 -y -o json)" + submit_code="$(echo "$submit_out" | jq -r '.code // 0')" + if [[ "$submit_code" != "0" ]]; then + submit_log="$(echo "$submit_out" | jq -r '.raw_log // .log // empty')" + echo "error: governance submit-proposal failed (code=$submit_code)" >&2 + echo "detail: $submit_log" >&2 + exit 1 + fi + proposal_id="$(echo "$submit_out" | jq -r ' + .proposal_id // .tx_response?.proposal_id // .tx_response?.events[]? | select(.type=="submit_proposal") | .attributes[]? | select(.key=="proposal_id" or .key=="cHJvcG9zYWxfaWQ=") | .value + ' 2>/dev/null | tail -n 1)" + if [[ -z "$proposal_id" || "$proposal_id" == "null" ]]; then + t0="$(date +%s)" + while true; do + after_latest="$(evmd query gov proposals --node "$NODE_RPC" -o json 2>/dev/null | jq -r '[.proposals[]?.id | tonumber] | max // 0')" + if [[ "$after_latest" =~ ^[0-9]+$ ]] && (( after_latest > before_latest )); then + proposal_id="$after_latest" + break + fi + elapsed="$(($(date +%s) - t0))" + if (( elapsed > 30 )); then + break + fi + sleep 2 + done + fi + if [[ -z "$proposal_id" || "$proposal_id" == "null" ]]; then + echo "error: could not determine governance proposal id for pool delegator update" >&2 + echo "debug: before_latest=$before_latest submit_out_snippet=$(printf '%s' "$submit_out" | tr '\n' ' ' | sed 's/[[:space:]]\+/ /g' | cut -c1-300)" >&2 + exit 1 + fi + echo "proposal_id=$proposal_id" + if ! vote_proposal_with_validator_majority "$proposal_id"; then + exit 1 + fi + sleep "$GOV_WAIT_INITIAL" + t0="$(date +%s)" + while true; do + status="$(evmd query gov proposal "$proposal_id" --node "$NODE_RPC" -o json | jq -r '.proposal.status')" + case "$status" in + PROPOSAL_STATUS_PASSED) + break + ;; + PROPOSAL_STATUS_REJECTED|PROPOSAL_STATUS_FAILED|PROPOSAL_STATUS_ABORTED) + echo "error: governance proposal reached terminal non-passed status=$status" >&2 + exit 1 + ;; + *) + elapsed="$(($(date +%s) - t0))" + if (( elapsed > GOV_STATUS_TIMEOUT )); then + echo "error: governance proposal did not pass before timeout (status=$status timeout=${GOV_STATUS_TIMEOUT}s)" >&2 + exit 1 + fi + sleep 2 + ;; + esac + done + t0="$(date +%s)" + while true; do + current_addr="$(evmd query poolrebalancer params --node "$NODE_RPC" -o json | jq -r '.params.pool_delegator_address // ""')" + [[ "$current_addr" == "$POOL_DEL_ADDR" ]] && break + elapsed="$(($(date +%s) - t0))" + if [[ "$elapsed" -gt "$GOV_POLL_TIMEOUT" ]]; then + echo "error: pool_delegator_address not propagated (have=$current_addr want=$POOL_DEL_ADDR)" >&2 + exit 1 + fi + sleep 2 + done +} + +configure_contract_pool_delegator() { + log_pool_contract_setup_banner + deploy_pool_contract_if_needed + set_pool_automation_caller + set_pool_delegator_param_runtime +} + +set_pool_contract_config() { + local max_retrieve="$1" + local max_validators="$2" + local min_stake="$3" + local errf + errf="$(mktemp "${TMPDIR:-/tmp}/poolrebalancer-setconfig.XXXXXX")" + if ! cast send --json --rpc-url "$EVM_RPC" --private-key "$POOL_OWNER_PK" \ + "$POOL_EVM_ADDR" "setConfig(uint32,uint32,uint256)" "$max_retrieve" "$max_validators" "$min_stake" 2>"$errf" >/dev/null; then + echo "error: CommunityPool setConfig failed (max_retrieve=$max_retrieve max_validators=$max_validators min_stake=$min_stake pool=$POOL_EVM_ADDR)" >&2 + cat "$errf" >&2 + rm -f "$errf" + return 1 + fi + rm -f "$errf" +} + +verify_contract_pool_readiness() { + CURRENT_PHASE="verify_contract_pool_readiness" + echo "==> Verifying contract pool readiness" + local onchain_del caller_raw caller_lc module_lc stakeable total_staked principal_assets + onchain_del="$(evmd query poolrebalancer params --node "$NODE_RPC" -o json | jq -r '.params.pool_delegator_address // ""')" + if [[ "$onchain_del" != "$POOL_DEL_ADDR" ]]; then + echo "error: readiness failed; pool_delegator_address mismatch (have=$onchain_del want=$POOL_DEL_ADDR)" >&2 + exit 1 + fi + caller_raw="$(cast call --rpc-url "$EVM_RPC" "$POOL_EVM_ADDR" "automationCaller()(address)" 2>/dev/null || true)" + caller_lc="$(printf '%s' "$caller_raw" | tr '[:upper:]' '[:lower:]')" + module_lc="$(printf '%s' "$MODULE_EVM" | tr '[:upper:]' '[:lower:]')" + if [[ -z "$caller_lc" || "$caller_lc" != "$module_lc" ]]; then + echo "error: readiness failed; automationCaller mismatch (have=$caller_raw want=$MODULE_EVM)" >&2 + exit 1 + fi + + stakeable="$(pool_call_uint256 "stakeablePrincipalLedger()(uint256)")" + total_staked="$(pool_call_uint256 "totalStaked()(uint256)")" + principal_assets="$(pool_call_uint256 "principalAssets()(uint256)")" + if [[ "$stakeable" == "n/a" || "$total_staked" == "n/a" || "$principal_assets" == "n/a" ]]; then + echo "error: readiness failed; unable to query CommunityPool accounting views" >&2 + exit 1 + fi + echo "pool_readiness: stakeable=$stakeable total_staked=$total_staked principal_assets=$principal_assets" +} + +wait_for_pool_stake_activation() { + local timeout_secs="${1:-120}" + local start now total_staked del_count + start="$(date +%s)" + while true; do + total_staked="$(pool_call_uint256 "totalStaked()(uint256)")" + del_count="$(evmd query staking delegations "$POOL_DEL_ADDR" --node "$NODE_RPC" -o json 2>/dev/null | jq -r '.delegation_responses | length // 0')" + if [[ "$total_staked" =~ ^[0-9]+$ ]] && (( total_staked > 0 )) && [[ "$del_count" =~ ^[0-9]+$ ]] && (( del_count > 0 )); then + echo "pool_stake_activated: total_staked=$total_staked delegations_count=$del_count" + return 0 + fi + now="$(date +%s)" + if (( now - start > timeout_secs )); then + echo "error: timed out waiting for CommunityPool automated stake activation" >&2 + echo "hint: verify automationCaller, pool_delegator_address, and chain EndBlock logs" >&2 + return 1 + fi + sleep 2 + done +} + +seed_contract_imbalance() { + CURRENT_PHASE="seed_contract_imbalance" + echo "==> Creating contract-driven initial imbalance (scenario=$SCENARIO)" + local seed_max_validators=1 + local seed_amount_main seed_amount_minor + seed_amount_main="$POOL_SEED_DEPOSIT_AMOUNT" + seed_amount_minor="$(coin_amount_to_uint "$IMBALANCE_MINOR_DELEGATION")" + echo "seed_plan: main=$seed_amount_main minor=$seed_amount_minor" + + case "$SCENARIO" in + expansion) + seed_max_validators=3 + ;; + threshold_boundary|happy_path|caps) + seed_max_validators=1 + ;; + *) + echo "error: unsupported SCENARIO in contract seeding: $SCENARIO" >&2 + exit 1 + ;; + esac + + echo "==> Applying CommunityPool setConfig for seeding (max_retrieve=10 max_validators=$seed_max_validators minStake=1)" + set_pool_contract_config 10 "$seed_max_validators" 1 + + case "$SCENARIO" in + threshold_boundary) + echo "==> Seeding small contract deposit: amount=$seed_amount_minor" + deposit_to_pool_once "$seed_amount_minor" + ;; + expansion) + echo "==> Seeding contract deposits for expansion profile: main=$seed_amount_main minor=$seed_amount_minor" + deposit_to_pool_once "$seed_amount_main" + deposit_to_pool_once "$seed_amount_minor" + deposit_to_pool_once "$seed_amount_minor" + ;; + happy_path|caps) + echo "==> Seeding contract deposits for skew profile: main=$seed_amount_main minor=$seed_amount_minor" + deposit_to_pool_once "$seed_amount_main" + deposit_to_pool_once "$seed_amount_minor" + ;; + esac + + wait_for_pool_stake_activation 150 + + if [[ "$SCENARIO" == "expansion" ]]; then + EXPANSION_INITIAL_DELEGATED=() + while IFS= read -r val; do + [[ -z "$val" ]] && continue + EXPANSION_INITIAL_DELEGATED+=("$val") + done < <(evmd query staking delegations "$POOL_DEL_ADDR" --node "$NODE_RPC" -o json | jq -r '.delegation_responses[]?.delegation.validator_address' | head -n 3) + echo "expansion_seeded_validators=${#EXPANSION_INITIAL_DELEGATED[@]}" + fi + + # Restore broader staking spread for post-seed automation behavior. + local post_seed_min_stake="1" + echo "==> Final CommunityPool setConfig (max_retrieve=10 max_validators=5 minStake=$post_seed_min_stake)" + wait_evm_nonce_settled_for_pk "$POOL_OWNER_PK" 90 + set_pool_contract_config 10 5 "$post_seed_min_stake" || exit 1 +} + +# First-line context for watch when the chain is up but setup has not wired the pool yet. +log_watch_pool_delegator_setup_hint() { + local mode_label="${1:-watch}" + local node="${NODE_RPC:-tcp://127.0.0.1:26657}" + local rule="──────────────────────────────────────────────────────────────────────────────" + local params del + params="$(evmd query poolrebalancer params --node "$node" -o json 2>/dev/null || true)" + del="$(printf '%s' "$params" | jq -r '.params.pool_delegator_address // empty' 2>/dev/null || true)" + if [[ -n "$del" ]]; then + return 0 + fi + printf '%s\n' "$rule" + echo "$mode_label: pool_delegator_address is not set on-chain yet." + echo "If another shell is still running this script (default run flow), it may be deploying CommunityPool," + echo "configuring automation, and passing governance to set poolrebalancer.params.pool_delegator_address." + echo "Rough guide: on a typical local devnet, wiring often completes around block heights ~30–32; wall-clock often ~1–3 min." + echo "Caps: proposal status poll up to ${GOV_STATUS_TIMEOUT}s, then param propagation up to ${GOV_POLL_TIMEOUT}s." + echo "This watch keeps polling until reads succeed." + printf '%s\n' "$rule" +} + +check_pending_invariants() { + local json="$1" + local cap="$2" + local max_ops="$3" + + # Important nuance: + # pending-redelegations query returns primary records that can merge multiple ops + # sharing (delegator, denom, dst, completionTime). With max_ops_per_block > 1, + # a merged record amount can exceed max_move_per_op even if each individual op respected the cap. + # So strict cap checking is only sound when max_ops_per_block == 1. + if [[ "$cap" != "0" && "$max_ops" == "1" ]]; then + local badAmt + badAmt="$(echo "$json" | jq -r --argjson cap "$cap" '[.redelegations[] | (.amount.amount|tonumber) > $cap] | any')" + if [[ "$badAmt" != "false" ]]; then + echo "warning: found pending amount > max_move_per_op" >&2 + return 1 + fi + elif [[ "$cap" != "0" && "$max_ops" != "1" ]]; then + echo "note: skipping strict max_move_per_op check on merged primary entries (max_ops_per_block=$max_ops)" + fi + + # Transitive safety: a source validator must not also be an in-flight destination. + local badTrans + badTrans="$(echo "$json" | jq -r '([.redelegations[].src_validator_address] | unique) as $srcs | ([.redelegations[].dst_validator_address] | unique) as $dsts | ([ $srcs[] | . as $s | (($dsts | index($s)) != null) ] | any)')" + if [[ "$badTrans" != "false" ]]; then + echo "warning: transitive safety violated (src appears in dst set)" >&2 + return 1 + fi +} + +watch_rebalance_status() { + # Read-only watch mode for an already running test chain. + # Use this to inspect params/pending queues without re-running setup. + CURRENT_PHASE="watch" + local node="${NODE_RPC:-tcp://127.0.0.1:26657}" + local status_url last_h="" + status_url="$(tendermint_status_url)" + + while true; do + local h params del pr stakeable total_staked principal_assets reward_reserve caller_raw caller_lc module_lc automation_ready pending_red_json + h="$(curl -sS --max-time 2 "$status_url" | jq -r '.result.sync_info.latest_block_height // "n/a"')" + if [[ -z "$h" || "$h" == "n/a" || "$h" == "$last_h" ]]; then + sleep 1 + continue + fi + last_h="$h" + params="$(evmd query poolrebalancer params --node "$node" -o json 2>/dev/null || echo '{}')" + del="$(echo "$params" | jq -r '.params.pool_delegator_address // empty')" + if [[ -z "${POOL_EVM_ADDR:-}" && -n "$del" ]]; then + POOL_EVM_ADDR="$(resolve_evm_hex_from_bech32 "$del")" + if [[ -n "$POOL_EVM_ADDR" ]]; then + POOL_DEL_ADDR="$del" + fi + fi + pending_red_json="$(evmd query poolrebalancer pending-redelegations --node "$node" -o json 2>/dev/null || echo '{"redelegations":[]}' )" + pr="$(echo "$pending_red_json" | jq -r '.redelegations | length // 0')" + stakeable="n/a" + total_staked="n/a" + principal_assets="n/a" + reward_reserve="n/a" + caller_raw="" + automation_ready="no" + if [[ -n "${POOL_EVM_ADDR:-}" ]]; then + stakeable="$(pool_call_uint256 "stakeablePrincipalLedger()(uint256)")" + total_staked="$(pool_call_uint256 "totalStaked()(uint256)")" + principal_assets="$(pool_call_uint256 "principalAssets()(uint256)")" + reward_reserve="$(pool_call_uint256 "rewardReserve()(uint256)")" + caller_raw="$(cast call --rpc-url "$EVM_RPC" "$POOL_EVM_ADDR" "automationCaller()(address)" 2>/dev/null || true)" + caller_lc="$(printf '%s' "$caller_raw" | tr '[:upper:]' '[:lower:]')" + module_lc="$(printf '%s' "$MODULE_EVM" | tr '[:upper:]' '[:lower:]')" + if [[ -n "$caller_lc" && -n "$module_lc" && "$caller_lc" == "$module_lc" && -n "$del" && "$del" == "$POOL_DEL_ADDR" ]]; then + automation_ready="yes" + fi + fi + + if [[ "$WATCH_COMPACT" == "true" ]]; then + echo "watch phase=$CURRENT_PHASE height=$h pending_red=$pr stakeable=$stakeable total_staked=$total_staked principal_assets=$principal_assets reward_reserve=$reward_reserve automation_ready=$automation_ready scenario=$SCENARIO" + else + echo "----- rebalance watch -----" + echo "phase=$CURRENT_PHASE height=$h pending_red=$pr stakeable=$stakeable total_staked=$total_staked principal_assets=$principal_assets reward_reserve=$reward_reserve automation_ready=$automation_ready" + echo "$params" | jq -r '.params | {pool_delegator_address,max_target_validators,rebalance_threshold_bp,max_ops_per_block,max_move_per_op}' + + if [[ -n "$del" ]]; then + local del_json + del_json="$(evmd query staking delegations "$del" --node "$node" -o json 2>/dev/null || echo '{"delegation_responses":[]}' )" + echo "$del_json" | jq -r '.delegation_responses[]? | {validator: .delegation.validator_address, amount: .balance.amount, denom: .balance.denom}' + if [[ "$WATCH_INITIAL_DELEGATIONS_LOGGED" != "true" ]]; then + local del_count + del_count="$(echo "$del_json" | jq -r '.delegation_responses | length // 0')" + if [[ "$del_count" =~ ^[0-9]+$ ]] && (( del_count > 0 )); then + if [[ "$pr" == "0" ]]; then + echo "pre_rebalance_initial_delegations:" + else + echo "initial_delegations_first_observed (pending already started):" + fi + echo "$del_json" | jq -r '.delegation_responses[]? | {validator: .delegation.validator_address, amount: .balance.amount, denom: .balance.denom}' + WATCH_INITIAL_DELEGATIONS_LOGGED="true" + fi + fi + else + echo "pool delegator not configured" + fi + echo + fi + done +} + +setup_localnet() { + CURRENT_PHASE="setup_localnet" + SETUP_STARTED="true" + echo "==> Stopping any existing test chain" + stop_nodes + + echo "==> Generating test genesis ($VALIDATOR_COUNT validators) at $BASEDIR" + # multi_node_startup.sh is verbose during init; silence setup noise here. + (cd "$ROOT_DIR" && VALIDATOR_COUNT="$VALIDATOR_COUNT" DEV_ACCOUNT_COUNT="${DEV_ACCOUNT_COUNT:-100}" GENERATE_GENESIS=true ./multi_node_startup.sh -y >/dev/null 2>&1) + resolve_pool_runtime_keys + +} + +configure_genesis_params() { + CURRENT_PHASE="configure_genesis" + echo "==> Pool delegator mode = $POOL_DELEGATOR_MODE" + echo "==> SCENARIO=$SCENARIO VALIDATOR_COUNT=$VALIDATOR_COUNT DEMO_PROFILE=$DEMO_PROFILE threshold_bp=$POOLREBALANCER_THRESHOLD_BP max_target_validators=$POOLREBALANCER_MAX_TARGET_VALIDATORS max_ops_per_block=$POOLREBALANCER_MAX_OPS_PER_BLOCK max_move_per_op=$POOLREBALANCER_MAX_MOVE_PER_OP" + echo "==> Patching genesis staking params (unbonding_time + max_entries)" + patch_genesis_staking_params + echo "==> Patching genesis poolrebalancer params (pool_delegator_address configured at runtime)" + patch_genesis_poolrebalancer_params +} + +start_validators() { + CURRENT_PHASE="start_validators" + echo "==> Starting validators" + mkdir -p "$BASEDIR/logs" + for v in $(seq 0 $((VALIDATOR_COUNT - 1))); do + (cd "$ROOT_DIR" && VALIDATOR_COUNT="$VALIDATOR_COUNT" START_VALIDATOR=true NODE_NUMBER="$v" ./multi_node_startup.sh >"$BASEDIR/logs/val${v}.log" 2>&1 &) + done + if [[ "$STREAM_VALIDATOR_LOGS" == "true" ]]; then + echo "==> Streaming validator logs (val0..val$((VALIDATOR_COUNT - 1)))" + start_validator_log_streams + fi +} + +wait_chain_ready() { + CURRENT_PHASE="wait_chain_ready" + echo "==> Waiting for block production" + local h + h="$(wait_for_height 60)" + echo "height=$h" + + # Resolve chain bond denom and rewrite amount knobs to match this network. + BOND_DENOM="$(evmd query staking params --node "$NODE_RPC" -o json | jq -r '.params.bond_denom // .bond_denom')" + if [[ -z "$BOND_DENOM" || "$BOND_DENOM" == "null" ]]; then + echo "error: could not determine bond_denom from staking params" >&2 + exit 1 + fi + echo "bond_denom=$BOND_DENOM" + TX_FEES="${TX_FEES%ogwei}${BOND_DENOM}" + IMBALANCE_MAIN_DELEGATION="${IMBALANCE_MAIN_DELEGATION%ogwei}${BOND_DENOM}" + IMBALANCE_MINOR_DELEGATION="${IMBALANCE_MINOR_DELEGATION%ogwei}${BOND_DENOM}" + ensure_evm_rpc_ready +} + +seed_initial_imbalance() { + CURRENT_PHASE="seed_initial_imbalance" + if [[ "$POOL_DELEGATOR_MODE" == "contract" ]]; then + seed_contract_imbalance + return 0 + fi + echo "error: unsupported POOL_DELEGATOR_MODE=$POOL_DELEGATOR_MODE" >&2 + exit 1 +} + +run_sanity_checks() { + CURRENT_PHASE="run_sanity_checks" + echo "==> Sanity checks (params + delegations)" + local onchain_del + onchain_del="$(evmd query poolrebalancer params --node "$NODE_RPC" -o json | jq -r '.params.pool_delegator_address')" + if [[ "$onchain_del" != "$POOL_DEL_ADDR" ]]; then + echo "error: poolrebalancer params.pool_delegator_address mismatch" >&2 + echo " expected: $POOL_DEL_ADDR" >&2 + echo " got: $onchain_del" >&2 + exit 1 + fi + + local del_count + del_count="$(evmd query staking delegations "$POOL_DEL_ADDR" --node "$NODE_RPC" -o json | jq -r '.delegation_responses | length')" + echo "delegations_count=$del_count" + + local bonded_count + bonded_count="$(evmd query staking validators --node "$NODE_RPC" -o json | jq -r '[.validators[] | select(.status=="BOND_STATUS_BONDED")] | length')" + echo "bonded_validators=$bonded_count" + if (( bonded_count == 0 )); then + echo "error: no bonded validators found; cannot rebalance" >&2 + exit 1 + fi + if [[ "$POOL_DELEGATOR_MODE" == "contract" && "$del_count" == "0" ]]; then + echo "error: contract delegator has zero delegations after seeding; rebalance cannot run" >&2 + exit 1 + fi + + if [[ "$SCENARIO" == "expansion" ]]; then + if (( bonded_count < 5 )); then + echo "error: expansion expects at least 5 bonded validators, got $bonded_count (use --nodes 5)" >&2 + exit 1 + fi + + if (( ${#EXPANSION_INITIAL_DELEGATED[@]} != 3 )); then + echo "error: expansion seed did not produce 3 initial delegations (got ${#EXPANSION_INITIAL_DELEGATED[@]})" >&2 + exit 1 + fi + + local bonded_json + bonded_json="$(evmd query staking validators --node "$NODE_RPC" -o json | jq -c '[.validators[] | select(.status=="BOND_STATUS_BONDED") | .operator_address] | unique')" + local seeded_json + seeded_json="$(printf '%s\n' "${EXPANSION_INITIAL_DELEGATED[@]}" | jq -R . | jq -s -c 'unique')" + EXPANSION_MISSING_DSTS=() + while IFS= read -r val; do + [[ -z "$val" ]] && continue + EXPANSION_MISSING_DSTS+=("$val") + done < <(jq -n -r --argjson bonded "$bonded_json" --argjson delegated "$seeded_json" '($bonded - $delegated)[]') + + if (( ${#EXPANSION_MISSING_DSTS[@]} < 2 )); then + echo "error: expansion expects at least 2 bonded validators outside the initial pool delegation set (got ${#EXPANSION_MISSING_DSTS[@]})" >&2 + exit 1 + fi + + EXPANSION_OBSERVED_DSTS_TEXT="" + echo "scenario_check expansion: bonded=$bonded_count initial_seeded=${#EXPANSION_INITIAL_DELEGATED[@]} extra_targets=${#EXPANSION_MISSING_DSTS[@]}" + fi + +} + +update_expansion_observed_dsts() { + local pending_json="$1" + if [[ "$SCENARIO" != "expansion" ]]; then + return 0 + fi + + local dst target + while IFS= read -r dst; do + [[ -z "$dst" ]] && continue + for target in "${EXPANSION_MISSING_DSTS[@]}"; do + if [[ "$dst" == "$target" ]]; then + if ! printf '%s\n' "$EXPANSION_OBSERVED_DSTS_TEXT" | grep -Fxq "$dst" 2>/dev/null; then + if [[ -n "$EXPANSION_OBSERVED_DSTS_TEXT" ]]; then + EXPANSION_OBSERVED_DSTS_TEXT="${EXPANSION_OBSERVED_DSTS_TEXT}"$'\n'"$dst" + else + EXPANSION_OBSERVED_DSTS_TEXT="$dst" + fi + fi + break + fi + done + done < <(echo "$pending_json" | jq -r '.redelegations[]?.dst_validator_address') +} + +expansion_observed_count() { + local count=0 + local target + for target in "${EXPANSION_MISSING_DSTS[@]}"; do + if printf '%s\n' "$EXPANSION_OBSERVED_DSTS_TEXT" | grep -Fxq "$target" 2>/dev/null; then + count=$((count + 1)) + fi + done + echo "$count" +} + +observe_and_monitor() { + CURRENT_PHASE="observe_and_monitor" + echo "==> Observing pending operations (scenario=$SCENARIO)" + # Poll loop used for observation: + # - collect pending queue state + # - wait until any pending operations appear + # - validate generic invariants + for i in $(seq 1 "$POLL_SAMPLES"); do + local height pending + height="$(curl -sS --max-time 2 "$(tendermint_status_url)" | jq -r '.result.sync_info.latest_block_height')" + local j + j="$(evmd query poolrebalancer pending-redelegations --node "$NODE_RPC" -o json)" + update_expansion_observed_dsts "$j" + pending="$(echo "$j" | jq -r '.redelegations | length')" + if [[ "$WATCH_COMPACT" == "true" ]]; then + echo "sample=$i phase=$CURRENT_PHASE height=$height pending_red=$pending scenario=$SCENARIO" + else + echo "sample=$i phase=$CURRENT_PHASE height=$height pending_red=$pending" + fi + if [[ "$SCENARIO" == "expansion" ]]; then + local seen expected + seen="$(expansion_observed_count)" + expected="${#EXPANSION_MISSING_DSTS[@]}" + echo "expansion_progress: observed_new_destinations=$seen/$expected" + fi + + if (( pending > 0 )); then + check_pending_invariants "$j" "$POOLREBALANCER_MAX_MOVE_PER_OP" "$POOLREBALANCER_MAX_OPS_PER_BLOCK" + echo "info: pending operations observed; continuing monitor" + if [[ "$KEEP_RUNNING" != "true" ]]; then + exit 0 + fi + CURRENT_PHASE="steady_monitor" + echo "==> KEEP_RUNNING=true, continuing in monitor mode (Ctrl+C to stop)" + while true; do + local monitorHeight monitorRed + monitorHeight="$(curl -sS --max-time 2 "$(tendermint_status_url)" | jq -r '.result.sync_info.latest_block_height')" + monitorRed="$(evmd query poolrebalancer pending-redelegations --node "$NODE_RPC" -o json | jq -r '.redelegations | length')" + if [[ "$WATCH_COMPACT" == "true" ]]; then + echo "monitor phase=$CURRENT_PHASE height=$monitorHeight pending_red=$monitorRed scenario=$SCENARIO" + else + echo "monitor phase=$CURRENT_PHASE height=$monitorHeight pending_red=$monitorRed" + fi + sleep "$POLL_SLEEP_SECS" + done + fi + sleep "$POLL_SLEEP_SECS" + done + + echo "info: no pending operations observed within polling window" >&2 + echo "note: this can be expected when drift is below threshold or the system is already balanced" >&2 + exit 0 +} + +apply_scenario_defaults() { + # Scenario defaults encode engineer-friendly test behavior. + # They are applied only when the corresponding env var was not explicitly set. + case "$SCENARIO" in + # Canonical scenarios + happy_path) + if [[ -z "$VALIDATOR_COUNT" ]]; then VALIDATOR_COUNT=3; fi + ;; + caps) + if [[ -z "$VALIDATOR_COUNT" ]]; then VALIDATOR_COUNT=3; fi + if [[ "$USER_SET_MAX_OPS_PER_BLOCK" != "true" ]]; then POOLREBALANCER_MAX_OPS_PER_BLOCK=1; fi + if [[ "$USER_SET_MAX_MOVE_PER_OP" != "true" ]]; then POOLREBALANCER_MAX_MOVE_PER_OP=1000000000000000000; fi + if [[ "$USER_SET_POOL_SEED_DEPOSIT_AMOUNT" != "true" ]]; then POOL_SEED_DEPOSIT_AMOUNT=500000000000000000000; fi + ;; + threshold_boundary) + if [[ -z "$VALIDATOR_COUNT" ]]; then VALIDATOR_COUNT=3; fi + if [[ "$USER_SET_THRESHOLD_BP" != "true" ]]; then POOLREBALANCER_THRESHOLD_BP=5000; fi + if [[ "$USER_SET_MAX_OPS_PER_BLOCK" != "true" ]]; then POOLREBALANCER_MAX_OPS_PER_BLOCK=2; fi + if [[ "$USER_SET_MAX_MOVE_PER_OP" != "true" ]]; then POOLREBALANCER_MAX_MOVE_PER_OP=100000000000000000000; fi + ;; + expansion) + if [[ -z "$VALIDATOR_COUNT" ]]; then VALIDATOR_COUNT=5; fi + if [[ "$USER_SET_MAX_TARGET_VALIDATORS" != "true" ]]; then POOLREBALANCER_MAX_TARGET_VALIDATORS=5; fi + if [[ "$USER_SET_MAX_OPS_PER_BLOCK" != "true" ]]; then POOLREBALANCER_MAX_OPS_PER_BLOCK=1; fi + if [[ "$USER_SET_MAX_MOVE_PER_OP" != "true" ]]; then POOLREBALANCER_MAX_MOVE_PER_OP=10000000000000000000; fi + if [[ "$USER_SET_IMBALANCE_MINOR_DELEGATION" != "true" ]]; then IMBALANCE_MINOR_DELEGATION=1000000000000000000000ogwei; fi + ;; + # Backward-compatible aliases + baseline_3val) + SCENARIO="happy_path" + if [[ -z "$VALIDATOR_COUNT" ]]; then VALIDATOR_COUNT=3; fi + ;; + max_target_gt_bonded_3val) + SCENARIO="happy_path" + if [[ -z "$VALIDATOR_COUNT" ]]; then VALIDATOR_COUNT=3; fi + if [[ "$USER_SET_MAX_TARGET_VALIDATORS" != "true" ]]; then POOLREBALANCER_MAX_TARGET_VALIDATORS=5; fi + ;; + target_set_expansion_5val) + SCENARIO="expansion" + if [[ -z "$VALIDATOR_COUNT" ]]; then VALIDATOR_COUNT=5; fi + if [[ "$USER_SET_MAX_TARGET_VALIDATORS" != "true" ]]; then POOLREBALANCER_MAX_TARGET_VALIDATORS=5; fi + ;; + *) + echo "invalid SCENARIO: $SCENARIO" >&2 + echo "expected: happy_path|caps|threshold_boundary|expansion" >&2 + exit 1 + ;; + esac +} + +main() { + trap on_interrupt INT TERM + trap cleanup_on_exit EXIT + PARSED_SUBCOMMAND="" + if ! parse_cli_args "$@"; then + usage + exit 1 + fi + + if [[ "$PARSED_SUBCOMMAND" == "watch" ]]; then + require_bin jq + require_bin curl + require_bin evmd + # Match main() tuning so watch output aligns with seeded chains. + apply_scenario_defaults + case "${DEMO_PROFILE:-medium}" in + slow) + POOLREBALANCER_MAX_OPS_PER_BLOCK="${POOLREBALANCER_MAX_OPS_PER_BLOCK:-1}" + POOLREBALANCER_MAX_MOVE_PER_OP="${POOLREBALANCER_MAX_MOVE_PER_OP:-10000000000000000000}" + ;; + medium) ;; + fast) + POOLREBALANCER_MAX_OPS_PER_BLOCK="${POOLREBALANCER_MAX_OPS_PER_BLOCK:-10}" + POOLREBALANCER_MAX_MOVE_PER_OP="${POOLREBALANCER_MAX_MOVE_PER_OP:-0}" + ;; + *) + echo "invalid DEMO_PROFILE: $DEMO_PROFILE (expected: slow|medium|fast)" >&2 + exit 1 + ;; + esac + log_watch_pool_delegator_setup_hint "watch" + watch_rebalance_status + exit 0 + fi + if [[ "$PARSED_SUBCOMMAND" == "help" ]]; then + usage + exit 0 + fi + # Lightweight entry: no genesis/validators — assumes devnet already running. + if [[ "$PARSED_SUBCOMMAND" == "user_flow_multikey" ]]; then + require_bin jq + require_bin curl + require_bin evmd + require_bin cast + apply_scenario_defaults + if [[ ! "$VALIDATOR_COUNT" =~ ^[0-9]+$ ]] || (( VALIDATOR_COUNT < 1 )); then + echo "invalid --nodes/VALIDATOR_COUNT: $VALIDATOR_COUNT (expected positive integer)" >&2 + exit 1 + fi + run_user_flow_multikey_subcommand + exit 0 + fi + if [[ "$PARSED_SUBCOMMAND" == "community_pool_edge_cases" ]]; then + require_bin jq + require_bin curl + require_bin evmd + require_bin cast + apply_scenario_defaults + if [[ ! "$VALIDATOR_COUNT" =~ ^[0-9]+$ ]] || (( VALIDATOR_COUNT < 1 )); then + echo "invalid --nodes/VALIDATOR_COUNT: $VALIDATOR_COUNT (expected positive integer)" >&2 + exit 1 + fi + run_community_pool_edge_cases_subcommand + exit 0 + fi + + require_bin jq + require_bin curl + require_bin evmd + require_bin cast + + apply_scenario_defaults + if [[ ! "$VALIDATOR_COUNT" =~ ^[0-9]+$ ]] || (( VALIDATOR_COUNT < 1 )); then + echo "invalid --nodes/VALIDATOR_COUNT: $VALIDATOR_COUNT (expected positive integer)" >&2 + exit 1 + fi + + case "$DEMO_PROFILE" in + slow) + POOLREBALANCER_MAX_OPS_PER_BLOCK="${POOLREBALANCER_MAX_OPS_PER_BLOCK:-1}" + POOLREBALANCER_MAX_MOVE_PER_OP="${POOLREBALANCER_MAX_MOVE_PER_OP:-10000000000000000000}" # 1e19 + ;; + medium) + # Defaults already set above. + ;; + fast) + POOLREBALANCER_MAX_OPS_PER_BLOCK="${POOLREBALANCER_MAX_OPS_PER_BLOCK:-10}" + POOLREBALANCER_MAX_MOVE_PER_OP="${POOLREBALANCER_MAX_MOVE_PER_OP:-0}" # no cap + ;; + *) + echo "invalid DEMO_PROFILE: $DEMO_PROFILE (expected: slow|medium|fast)" >&2 + exit 1 + ;; + esac + + # Execution flow: + # 1) test chain setup and genesis patching + # 2) contract deployment + runtime param wiring + # 3) readiness and scenario seeding + # 4) sanity checks and scenario-specific observers + resolve_mnemonics + setup_localnet + configure_genesis_params + start_validators + wait_chain_ready + configure_contract_pool_delegator + verify_contract_pool_readiness + seed_initial_imbalance + run_sanity_checks + observe_and_monitor +} + +main "$@" + diff --git a/tests/e2e/poolrebalancer/user_flow_multikey.sh b/tests/e2e/poolrebalancer/user_flow_multikey.sh new file mode 100755 index 00000000..81a2cfa8 --- /dev/null +++ b/tests/e2e/poolrebalancer/user_flow_multikey.sh @@ -0,0 +1,1262 @@ +#!/usr/bin/env bash +# Multi-account CommunityPool E2E: deposit → (optional) withdraw / claimWithdraw → (optional) claimRewards. +# Needs: dev_accounts.txt, pool_delegator_address on chain or POOL_CONTRACT_ADDR. See tests/e2e/poolrebalancer/README.md. +# Shared helpers: lib/pool_e2e_common.sh (RPC, cast, approve+deposit). +# +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +# shellcheck source=/dev/null +source "$SCRIPT_DIR/lib/pool_e2e_common.sh" + +ROOT_DIR="$(cd "$SCRIPT_DIR/../../.." && pwd)" + +# --- Paths & chain endpoints --- +BASEDIR="${BASEDIR:-"$HOME/.og-evm-devnet"}" +NODE_RPC="${NODE_RPC:-tcp://127.0.0.1:26657}" +CHAIN_ID="${CHAIN_ID:-10740}" +EVM_RPC="${EVM_RPC:-http://127.0.0.1:8545}" +BOND_PRECOMPILE="${BOND_PRECOMPILE:-0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE}" +CHAIN_HOME="${CHAIN_HOME:-$BASEDIR/val0}" + +# --- Pool & accounts --- +POOL_CONTRACT_ADDR="${POOL_CONTRACT_ADDR:-}" +# Optional opt-in stress profile. Empty preserves existing behavior. +USER_FLOW_STRESS_PROFILE="${USER_FLOW_STRESS_PROFILE:-}" +USER_COUNT_SET_BY_ENV=0 +DEPOSIT_INTERVAL_SECS_SET_BY_ENV=0 +FAIL_FAST_SET_BY_ENV=0 +WITHDRAW_USERS_SET_BY_ENV=0 +WITHDRAW_SUBMIT_RETRIES_SET_BY_ENV=0 +CLAIM_POLL_MAX_ATTEMPTS_SET_BY_ENV=0 +USER_FLOW_MODE_SET_BY_ENV=0 +DEPOSIT_CONCURRENCY_SET_BY_ENV=0 +WITHDRAW_CONCURRENCY_SET_BY_ENV=0 +CLAIM_CONCURRENCY_SET_BY_ENV=0 +CLAIM_REWARDS_CONCURRENCY_SET_BY_ENV=0 +BATCH_DELAY_MS_SET_BY_ENV=0 +if [[ -n "${USER_COUNT+x}" ]]; then USER_COUNT_SET_BY_ENV=1; fi +if [[ -n "${DEPOSIT_INTERVAL_SECS+x}" ]]; then DEPOSIT_INTERVAL_SECS_SET_BY_ENV=1; fi +if [[ -n "${FAIL_FAST+x}" ]]; then FAIL_FAST_SET_BY_ENV=1; fi +if [[ -n "${WITHDRAW_USERS+x}" ]]; then WITHDRAW_USERS_SET_BY_ENV=1; fi +if [[ -n "${WITHDRAW_SUBMIT_RETRIES+x}" ]]; then WITHDRAW_SUBMIT_RETRIES_SET_BY_ENV=1; fi +if [[ -n "${CLAIM_POLL_MAX_ATTEMPTS+x}" ]]; then CLAIM_POLL_MAX_ATTEMPTS_SET_BY_ENV=1; fi +if [[ -n "${USER_FLOW_MODE+x}" ]]; then USER_FLOW_MODE_SET_BY_ENV=1; fi +if [[ -n "${DEPOSIT_CONCURRENCY+x}" ]]; then DEPOSIT_CONCURRENCY_SET_BY_ENV=1; fi +if [[ -n "${WITHDRAW_CONCURRENCY+x}" ]]; then WITHDRAW_CONCURRENCY_SET_BY_ENV=1; fi +if [[ -n "${CLAIM_CONCURRENCY+x}" ]]; then CLAIM_CONCURRENCY_SET_BY_ENV=1; fi +if [[ -n "${CLAIM_REWARDS_CONCURRENCY+x}" ]]; then CLAIM_REWARDS_CONCURRENCY_SET_BY_ENV=1; fi +if [[ -n "${BATCH_DELAY_MS+x}" ]]; then BATCH_DELAY_MS_SET_BY_ENV=1; fi +USER_COUNT="${USER_COUNT:-5}" +DEPOSIT_AMOUNT_WEI="${DEPOSIT_AMOUNT_WEI:-100000000000000000000}" +DEV_ACCOUNTS_FILE="${DEV_ACCOUNTS_FILE:-$BASEDIR/dev_accounts.txt}" +AUTO_PROVISION_DEV_ACCOUNTS="${AUTO_PROVISION_DEV_ACCOUNTS:-1}" +AUTO_PROVISION_FUND_WEI="${AUTO_PROVISION_FUND_WEI:-1000000000000000000000}" +SKIP_DEPOSITS="${SKIP_DEPOSITS:-0}" +FAIL_FAST="${FAIL_FAST:-1}" +DEPOSIT_INTERVAL_SECS="${DEPOSIT_INTERVAL_SECS:-2}" +USER_FLOW_MODE="${USER_FLOW_MODE:-serial}" +DEPOSIT_CONCURRENCY="${DEPOSIT_CONCURRENCY:-1}" +WITHDRAW_CONCURRENCY="${WITHDRAW_CONCURRENCY:-1}" +CLAIM_CONCURRENCY="${CLAIM_CONCURRENCY:-1}" +CLAIM_REWARDS_CONCURRENCY="${CLAIM_REWARDS_CONCURRENCY:-1}" +BATCH_DELAY_MS="${BATCH_DELAY_MS:-0}" + +# --- Withdraw / claim --- +WITHDRAW_USERS="${WITHDRAW_USERS:-3}" +WITHDRAW_FRACTION_BP="${WITHDRAW_FRACTION_BP:-1000}" +UNBONDING_WAIT_BUFFER_SECS="${UNBONDING_WAIT_BUFFER_SECS:-10}" +CLAIM_POLL_INTERVAL_SECS="${CLAIM_POLL_INTERVAL_SECS:-2}" +CLAIM_POLL_MAX_ATTEMPTS="${CLAIM_POLL_MAX_ATTEMPTS:-100}" +WITHDRAW_SUBMIT_RETRIES="${WITHDRAW_SUBMIT_RETRIES:-25}" +WITHDRAW_RETRY_SLEEP_SECS="${WITHDRAW_RETRY_SLEEP_SECS:-2}" +WITHDRAW_CLAIM_GAS_LIMIT="${WITHDRAW_CLAIM_GAS_LIMIT:-9500000}" + +# --- Optional reward paths (defaults favor withdraw-internal reward handling) --- +POOL_OWNER_PK="${POOL_OWNER_PK:-}" +PRE_WITHDRAW_HARVEST="${PRE_WITHDRAW_HARVEST:-0}" +PRE_WITHDRAW_CLAIM_REWARDS="${PRE_WITHDRAW_CLAIM_REWARDS:-0}" +REWARD_SYNC_WAIT_SECS="${REWARD_SYNC_WAIT_SECS:-0}" +POST_CLAIMWITHDRAW_CLAIM_REWARDS="${POST_CLAIMWITHDRAW_CLAIM_REWARDS:-1}" +POST_CLAIMWITHDRAW_WAIT_SECS="${POST_CLAIMWITHDRAW_WAIT_SECS:-20}" +POST_CLAIMWITHDRAW_USERS="${POST_CLAIMWITHDRAW_USERS:-}" + +POOL_EVM_ADDR="" +DEPOSIT_OK=0 +DEPOSIT_FAIL=0 +RUN_START_TS=0 +RUN_END_TS=0 +WITHDRAW_SUBMIT_ATTEMPTED=0 +WITHDRAW_SUBMIT_SUCCESS=0 +WITHDRAW_SUBMIT_FAILED=0 +WITHDRAW_SUBMIT_RETRIES_TOTAL=0 +CLAIM_ATTEMPTED=0 +CLAIM_SUCCESS=0 +CLAIM_FAILED=0 +CLAIM_RETRIES_TOTAL=0 +POST_CLAIM_REWARDS_ATTEMPTED=0 +POST_CLAIM_REWARDS_SUCCESS=0 +POST_CLAIM_REWARDS_FAILED=0 +WITHDRAW_REQUEST_WINDOW_START="" +WITHDRAW_REQUEST_WINDOW_END="" +WITHDRAW_REQUESTS_MAPPED=0 +WITHDRAW_REQUESTS_CLAIMED_VERIFIED=0 + +usage() { + cat </dev/null 2>&1 || { + echo "missing dependency: $1" >&2 + exit 1 + } +} + +# Visual phase break + title; extra args are bullet hints. +log_flow_section() { + echo "" + echo "--------------------------------------------------------------------" + printf "==> %s\n" "$1" + shift || true + while [[ $# -gt 0 ]]; do + printf " * %s\n" "$1" + shift + done + echo "--------------------------------------------------------------------" +} + +is_stress_mode_active() { + [[ -n "${USER_FLOW_STRESS_PROFILE:-}" ]] +} + +safe_ratio_percent_2dp() { + local num="${1:-0}" den="${2:-0}" + python3 - "$num" "$den" <<'PY' +import sys +n = int(sys.argv[1]) +d = int(sys.argv[2]) +if d <= 0: + print("0.00") +else: + print(f"{(n * 100.0) / d:.2f}") +PY +} + +safe_avg_2dp() { + local num="${1:-0}" den="${2:-0}" + python3 - "$num" "$den" <<'PY' +import sys +n = int(sys.argv[1]) +d = int(sys.argv[2]) +if d <= 0: + print("0.00") +else: + print(f"{n / d:.2f}") +PY +} + +is_parallel_mode_active() { + [[ "${USER_FLOW_MODE:-serial}" == "parallel" ]] +} + +sleep_ms() { + local ms="${1:-0}" + [[ "$ms" =~ ^[0-9]+$ ]] || ms=0 + (( ms <= 0 )) && return 0 + python3 - "$ms" <<'PY' +import sys +import time +time.sleep(int(sys.argv[1]) / 1000.0) +PY +} + +count_dev_accounts_in_file() { + local f="$1" + awk ' + /^dev[0-9]+:/ { c++ } + END { print c + 0 } + ' "$f" 2>/dev/null || echo 0 +} + +append_dev_account_to_file() { + local f="$1" name="$2" bech32="$3" priv="$4" mnemonic="$5" + { + echo "" + echo "${name}:" + echo " address: ${bech32}" + echo " private_key: ${priv}" + echo " mnemonic: ${mnemonic}" + } >>"$f" +} + +create_dev_account_record() { + local idx="$1" + local name="dev${idx}" + local dev_home="$BASEDIR/.dev_keys_tmp" + local full_output mnemonic bech32 priv + + rm -rf "$dev_home" + mkdir -p "$dev_home" + full_output="$(evmd keys add "$name" --keyring-backend test --algo eth_secp256k1 --home "$dev_home" 2>&1)" + mnemonic="$(printf '%s\n' "$full_output" | sed -n '$p')" + bech32="$(evmd keys show "$name" -a --keyring-backend test --home "$dev_home" 2>/dev/null || true)" + priv="$(evmd keys unsafe-export-eth-key "$name" --keyring-backend test --home "$dev_home" 2>/dev/null || true)" + rm -rf "$dev_home" + + if [[ -z "$bech32" || -z "$priv" ]]; then + echo "error: failed to create account metadata for $name" >&2 + return 1 + fi + if [[ "$priv" != 0x* ]]; then + priv="0x$priv" + fi + printf '%s\n%s\n%s\n%s\n' "$name" "$bech32" "$priv" "$mnemonic" +} + +fund_dev_account_from_dev0() { + local to_addr_hex="$1" + local funder_pk + local errf + funder_pk="$(dev_account_private_key_from_file "dev0" "$DEV_ACCOUNTS_FILE" || true)" + if [[ -z "$funder_pk" ]]; then + echo "error: cannot auto-provision accounts: missing dev0 private key in $DEV_ACCOUNTS_FILE" >&2 + return 1 + fi + wait_evm_nonce_settled_for_pk "$funder_pk" "$EVM_RPC" 45 + errf="$(mktemp -t cast_fund_dev.XXXXXX)" + cast send --json --rpc-url "$EVM_RPC" --private-key "$funder_pk" --value "$AUTO_PROVISION_FUND_WEI" "$to_addr_hex" >/dev/null 2>"$errf" || { + cat "$errf" >&2 + rm -f "$errf" + return 1 + } + rm -f "$errf" +} + +ensure_dev_accounts_available() { + local target_count="$1" + local available idx record name bech32 priv mnemonic evm_addr + + available="$(count_dev_accounts_in_file "$DEV_ACCOUNTS_FILE")" + [[ "$available" =~ ^[0-9]+$ ]] || available=0 + (( available >= target_count )) && return 0 + + if [[ "$AUTO_PROVISION_DEV_ACCOUNTS" != "1" ]]; then + return 1 + fi + + log_flow_section "Auto-provision dev accounts" \ + "Requested USER_COUNT=$target_count but only $available account(s) in $DEV_ACCOUNTS_FILE." \ + "Creating and funding dev${available}..dev$((target_count - 1)) with AUTO_PROVISION_FUND_WEI=$AUTO_PROVISION_FUND_WEI." + + for idx in $(seq "$available" $((target_count - 1))); do + echo " -- provisioning dev${idx} ($((idx - available + 1))/$((target_count - available)))" + record="$(create_dev_account_record "$idx")" || return 1 + name="$(printf '%s\n' "$record" | sed -n '1p')" + bech32="$(printf '%s\n' "$record" | sed -n '2p')" + priv="$(printf '%s\n' "$record" | sed -n '3p')" + mnemonic="$(printf '%s\n' "$record" | sed -n '4p')" + evm_addr="$(resolve_evm_hex_from_bech32 "$bech32")" + if [[ -z "$evm_addr" || "$evm_addr" == "0x" ]]; then + echo "error: could not derive EVM address for auto-provisioned $name ($bech32)" >&2 + return 1 + fi + fund_dev_account_from_dev0 "$evm_addr" || return 1 + append_dev_account_to_file "$DEV_ACCOUNTS_FILE" "$name" "$bech32" "$priv" "$mnemonic" + echo " -- provisioned $name: $bech32 ($evm_addr)" + done +} + +normalize_user_counts_for_available_accounts() { + local available + local explicit_user_count_requested=0 + (( USER_COUNT_SET_BY_ENV == 1 )) && explicit_user_count_requested=1 + available="$(count_dev_accounts_in_file "$DEV_ACCOUNTS_FILE")" + [[ "$available" =~ ^[0-9]+$ ]] || available=0 + if (( available < 1 )); then + echo "error: no dev accounts found in $DEV_ACCOUNTS_FILE" >&2 + exit 1 + fi + + if (( USER_COUNT > available )); then + if is_stress_mode_active || (( explicit_user_count_requested == 1 )); then + if ensure_dev_accounts_available "$USER_COUNT"; then + available="$(count_dev_accounts_in_file "$DEV_ACCOUNTS_FILE")" + else + echo "error: requested USER_COUNT=$USER_COUNT but only $available dev accounts are available and auto-provisioning failed" >&2 + echo "hint: ensure EVM RPC is reachable and dev0 has funds; or lower --user-count / USER_COUNT" >&2 + exit 1 + fi + else + echo "error: USER_COUNT=$USER_COUNT exceeds available dev accounts=$available in $DEV_ACCOUNTS_FILE" >&2 + exit 1 + fi + fi + + if (( WITHDRAW_USERS > USER_COUNT )); then + echo "warning: WITHDRAW_USERS=$WITHDRAW_USERS exceeds USER_COUNT=$USER_COUNT; capping WITHDRAW_USERS to $USER_COUNT" >&2 + WITHDRAW_USERS="$USER_COUNT" + fi +} + +# POOL_CONTRACT_ADDR wins; else query poolrebalancer params and map bech32 → 0x. +resolve_pool_evm_addr() { + if [[ -n "$POOL_CONTRACT_ADDR" ]]; then + POOL_EVM_ADDR="$POOL_CONTRACT_ADDR" + log_flow_section "Pool contract (from env)" \ + "Using POOL_CONTRACT_ADDR from environment (skipping chain query for pool_delegator_address)." + echo " POOL_CONTRACT_ADDR=$POOL_EVM_ADDR" + return 0 + fi + local del params + log_flow_section "Resolve CommunityPool from chain" \ + "Reading x/poolrebalancer params for pool_delegator_address, then mapping bech32 to EVM 0x for cast calls." + params="$(evmd query poolrebalancer params --node "$NODE_RPC" -o json 2>/dev/null || true)" + del="$(echo "$params" | jq -r '.params.pool_delegator_address // empty')" + if [[ -z "$del" ]]; then + echo "error: set POOL_CONTRACT_ADDR or configure poolrebalancer.params.pool_delegator_address" >&2 + exit 1 + fi + POOL_EVM_ADDR="$(resolve_evm_hex_from_bech32 "$del")" + if [[ -z "$POOL_EVM_ADDR" || "$POOL_EVM_ADDR" == "0x" ]]; then + echo "error: could not resolve EVM address for pool delegator $del" >&2 + exit 1 + fi + echo " pool_delegator_bech32 $del" + echo " pool_evm $POOL_EVM_ADDR" +} + +# withdraw() requires non-zero pool delegation; poll totalStaked until set. +wait_for_total_staked() { + local timeout="${1:-180}" + local start total + start="$(date +%s)" + log_flow_section "Wait for pool stake (totalStaked > 0)" \ + "withdraw() sizing needs the pool to have delegated stake; polling CommunityPool.totalStaked()." + while true; do + total="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + if [[ "$total" =~ ^[0-9]+$ ]] && [[ "$total" != "0" ]]; then + echo " ok totalStaked=$total" + return 0 + fi + if (( $(date +%s) - start > timeout )); then + echo "error: totalStaked still zero after ${timeout}s" >&2 + exit 1 + fi + sleep 2 + done +} + +# Aggregate CommunityPool getters after deposits. +log_pool_snapshot() { + local tu pa ts spl + tu="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalUnits()(uint256)")" + pa="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "principalAssets()(uint256)")" + ts="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + spl="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "stakeablePrincipalLedger()(uint256)")" + log_flow_section "On-chain pool snapshot (read-only)" \ + "CommunityPool aggregate state after deposits (and any prior activity)." + echo " totalUnits $tu (sum of LP units)" + echo " principalAssets $pa (principal backing)" + echo " totalStaked $ts (bond delegated via poolrebalancer)" + echo " stakeablePrincipalLedger $spl (principal available to stake)" +} + +print_contract_correctness_checks() { + local phase="$1" + local tu pa ts spl rr + tu="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalUnits()(uint256)")" + pa="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "principalAssets()(uint256)")" + ts="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + spl="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "stakeablePrincipalLedger()(uint256)")" + rr="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "rewardReserve()(uint256)")" + + local principal_minus_staked="n/a" + local unit_price_ppm="n/a" + if [[ "$pa" =~ ^[0-9]+$ && "$ts" =~ ^[0-9]+$ ]]; then + principal_minus_staked=$((pa - ts)) + fi + if [[ "$tu" =~ ^[0-9]+$ && "$tu" != "0" && "$pa" =~ ^[0-9]+$ ]]; then + unit_price_ppm="$(python3 -c "print((int('$pa') * 1000000) // int('$tu'))")" + fi + + log_flow_section "Contract correctness checks ($phase)" \ + "pool=$POOL_EVM_ADDR" \ + "totals: totalUnits=$tu principalAssets=$pa totalStaked=$ts stakeablePrincipalLedger=$spl rewardReserve=$rr" \ + "derived: principal_minus_staked=$principal_minus_staked unit_price_ppm=$unit_price_ppm (assets per unit * 1e6)" \ + "flow: deposits_ok=$DEPOSIT_OK/$USER_COUNT withdraw_submitted=$WITHDRAW_SUBMIT_SUCCESS claim_ok=$CLAIM_SUCCESS post_claimRewards_ok=$POST_CLAIM_REWARDS_SUCCESS" + + if [[ -n "$WITHDRAW_REQUEST_WINDOW_START" && -n "$WITHDRAW_REQUEST_WINDOW_END" ]]; then + echo " withdraw_request_window: [$WITHDRAW_REQUEST_WINDOW_START, $WITHDRAW_REQUEST_WINDOW_END)" + echo " mapped_requests=$WITHDRAW_REQUESTS_MAPPED claimed_verified=$WITHDRAW_REQUESTS_CLAIMED_VERIFIED" + fi +} + +log_contract_snapshot_for_batch() { + local phase="$1" + local batch_label="$2" + local tu pa ts spl rr + tu="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalUnits()(uint256)")" + pa="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "principalAssets()(uint256)")" + ts="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "totalStaked()(uint256)")" + spl="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "stakeablePrincipalLedger()(uint256)")" + rr="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "rewardReserve()(uint256)")" + echo " snapshot[$phase][$batch_label]: totalUnits=$tu principalAssets=$pa totalStaked=$ts stakeablePrincipalLedger=$spl rewardReserve=$rr" +} + +withdraw_request_claimed_flag() { + local rid="$1" + local raw + raw="$(cast call --rpc-url "$EVM_RPC" "$POOL_EVM_ADDR" \ + "withdrawRequests(uint256)(address,uint256,uint64,bool,bool)" "$rid" 2>/dev/null || true)" + printf '%s\n' "$raw" | awk 'NF{c++} c==4 {print $1; exit}' +} + +# Approve + deposit for dev0..dev(N-1). +run_deposits() { + local i pk name + log_flow_section "Deposits ($USER_COUNT accounts)" \ + "Per account: approve bond ERC20 on the bond precompile, then CommunityPool.deposit(amount). Amount wei: $DEPOSIT_AMOUNT_WEI." + if is_parallel_mode_active && (( DEPOSIT_CONCURRENCY > 1 )); then + local batch_start batch_end conc tmpdir + conc="$DEPOSIT_CONCURRENCY" + tmpdir="$(mktemp -d -t user_flow_dep.XXXXXX)" + for batch_start in $(seq 0 "$conc" $((USER_COUNT - 1))); do + batch_end=$((batch_start + conc - 1)) + (( batch_end >= USER_COUNT )) && batch_end=$((USER_COUNT - 1)) + local pids=() files=() + for i in $(seq "$batch_start" "$batch_end"); do + local f="$tmpdir/dep_${i}.out" + files+=("$f") + ( + name="dev${i}" + pk="$(dev_account_private_key_from_file "$name" "$DEV_ACCOUNTS_FILE" || true)" + if [[ -z "$pk" ]]; then + echo "FAIL|$name|missing private key" + exit 0 + fi + echo " -- $name: approve bond + deposit into pool" + if approve_and_deposit "$pk" "$POOL_EVM_ADDR" "$BOND_PRECOMPILE" "$DEPOSIT_AMOUNT_WEI" "$EVM_RPC"; then + echo "OK|$name|" + else + echo "FAIL|$name|deposit tx failed" + fi + ) >"$f" 2>&1 & + pids+=("$!") + done + for p in "${pids[@]}"; do wait "$p"; done + for f in "${files[@]}"; do + local r line status uname msg last_log + line="$(awk '/^(OK|FAIL)\|/{print; exit}' "$f" 2>/dev/null || true)" + status="$(printf '%s' "$line" | awk -F'|' '{print $1}')" + uname="$(printf '%s' "$line" | awk -F'|' '{print $2}')" + msg="$(printf '%s' "$line" | awk -F'|' '{print $3}')" + if [[ "$status" == "OK" ]]; then + DEPOSIT_OK=$((DEPOSIT_OK + 1)) + echo " ok" + else + DEPOSIT_FAIL=$((DEPOSIT_FAIL + 1)) + last_log="$(awk 'NF{last=$0} END{print last}' "$f" 2>/dev/null || true)" + if [[ -z "$uname" ]]; then + uname="$(awk '/-- dev[0-9]+:/{for(i=1;i<=NF;i++){if($i ~ /^dev[0-9]+:$/){gsub(":","",$i); print $i; exit}}}' "$f" 2>/dev/null || true)" + fi + [[ -z "$uname" ]] && uname="unknown" + if [[ -z "$msg" && -n "$last_log" ]]; then + msg="$last_log" + fi + echo "warning: deposit failed for $uname${msg:+ ($msg)}" >&2 + fi + done + sleep_ms "$BATCH_DELAY_MS" + done + rm -rf "$tmpdir" + if (( DEPOSIT_FAIL > 0 )) && [[ "$FAIL_FAST" == "1" ]]; then + echo "error: one or more deposits failed and FAIL_FAST=1" >&2 + exit 1 + fi + echo " summary: deposits_ok=$DEPOSIT_OK deposits_failed=$DEPOSIT_FAIL" + return 0 + fi + for i in $(seq 0 $((USER_COUNT - 1))); do + name="dev${i}" + pk="$(dev_account_private_key_from_file "$name" "$DEV_ACCOUNTS_FILE" || true)" + if [[ -z "$pk" ]]; then + echo "error: missing $name in $DEV_ACCOUNTS_FILE (need USER_COUNT <= generated dev accounts)" >&2 + exit 1 + fi + echo " -- $name: approve bond + deposit into pool" + if approve_and_deposit "$pk" "$POOL_EVM_ADDR" "$BOND_PRECOMPILE" "$DEPOSIT_AMOUNT_WEI" "$EVM_RPC"; then + DEPOSIT_OK=$((DEPOSIT_OK + 1)) + echo " ok" + else + DEPOSIT_FAIL=$((DEPOSIT_FAIL + 1)) + echo "warning: deposit failed for $name" >&2 + if [[ "$FAIL_FAST" == "1" ]]; then + exit 1 + fi + fi + if [[ "$DEPOSIT_INTERVAL_SECS" =~ ^[0-9]+$ ]] && (( DEPOSIT_INTERVAL_SECS > 0 )) && (( i < USER_COUNT - 1 )); then + sleep "$DEPOSIT_INTERVAL_SECS" + fi + done + echo " summary: deposits_ok=$DEPOSIT_OK deposits_failed=$DEPOSIT_FAIL" +} + +resolve_pool_owner_pk() { + if [[ -n "${POOL_OWNER_PK:-}" ]]; then + return 0 + fi + POOL_OWNER_PK="$(dev_account_private_key_from_file "dev0" "$DEV_ACCOUNTS_FILE" || true)" +} + +# Latest EVM block header time as unix seconds (for maturity vs wall clock). +block_timestamp_unix() { + cast block latest --rpc-url "$EVM_RPC" --json 2>/dev/null | jq -r '.timestamp // empty' | python3 -c " +import sys +s=sys.stdin.read().strip() +if not s: + print(0) +elif s.startswith('0x'): + print(int(s,16)) +else: + print(int(s)) +" +} + +# withdrawRequests(requestId) maturity field as unix seconds (parsed from cast output). +withdraw_request_maturity_unix() { + local rid="$1" + cast call --rpc-url "$EVM_RPC" "$POOL_EVM_ADDR" \ + "withdrawRequests(uint256)(address,uint256,uint64,bool,bool)" "$rid" 2>/dev/null \ + | python3 -c " +import sys +lines=[l.strip() for l in sys.stdin if l.strip()] +if len(lines) < 3: + print(0) + sys.exit(0) +m = lines[2].split()[0] +if m.startswith('0x'): + print(int(m, 16)) +else: + print(int(m.split('[')[0].strip())) +" +} + +# Optional harvest / sleep / pre-claim before withdraw loop. +run_pre_withdraw_reward_sync() { + resolve_pool_owner_pk + log_flow_section "Pre-withdraw (optional reward sync)" \ + "By default we do not call claimRewards() here: withdraw() pulls pending rewards via _claimPendingRewards. Enable PRE_WITHDRAW_CLAIM_REWARDS=1 to force claimRewards() before withdraw." + if [[ "${PRE_WITHDRAW_HARVEST:-0}" == "1" && -n "${POOL_OWNER_PK:-}" ]]; then + echo " -- manual harvest() (debug; module EndBlock also harvests)" + if ! cast_send_expect_success "$EVM_RPC" "$POOL_OWNER_PK" "$POOL_EVM_ADDR" "harvest()"; then + echo "warning: manual harvest() reverted; continuing." >&2 + fi + elif [[ "${PRE_WITHDRAW_HARVEST:-0}" == "1" ]]; then + echo "warning: PRE_WITHDRAW_HARVEST=1 but no POOL_OWNER_PK — skipping harvest" >&2 + fi + if [[ "${REWARD_SYNC_WAIT_SECS:-0}" =~ ^[0-9]+$ ]] && (( REWARD_SYNC_WAIT_SECS > 0 )); then + echo " -- sleeping ${REWARD_SYNC_WAIT_SECS}s (REWARD_SYNC_WAIT_SECS)" + sleep "$REWARD_SYNC_WAIT_SECS" + fi + if [[ "${PRE_WITHDRAW_CLAIM_REWARDS:-0}" != "1" ]]; then + echo " skipping standalone claimRewards(); withdraw() will settle pending pool rewards." + return 0 + fi + local i name pk + for i in $(seq 0 $((WITHDRAW_USERS - 1))); do + name="dev${i}" + pk="$(dev_account_private_key_from_file "$name" "$DEV_ACCOUNTS_FILE" || true)" + [[ -z "$pk" ]] && continue + echo " -- PRE_WITHDRAW_CLAIM_REWARDS: claimRewards() for $name" + cast_send_expect_success "$EVM_RPC" "$pk" "$POOL_EVM_ADDR" "claimRewards()" || { + echo "error: claimRewards failed for $name" >&2 + exit 1 + } + done +} + +# Spin until block time >= maturity or wall-clock timeout. +wait_until_mature_or_timeout() { + local rid="$1" + local max_sec="${2:-240}" + local start mt bt + start="$(date +%s)" + mt="$(withdraw_request_maturity_unix "$rid")" + if [[ "$mt" == "0" ]]; then + sleep 3 + mt="$(withdraw_request_maturity_unix "$rid")" + fi + if [[ "$mt" == "0" ]]; then + echo "error: withdrawRequests($rid) has maturity 0 (withdraw tx may have reverted)" >&2 + return 1 + fi + echo " requestId=$rid maturityUnix=$mt (claimWithdraw allowed when latest block time >= this)" + echo " polling until block time catches up (max ${max_sec}s wall clock)..." + while true; do + bt="$(block_timestamp_unix)" + if [[ "$bt" =~ ^[0-9]+$ ]] && [[ "$mt" =~ ^[0-9]+$ ]] && (( mt > 0 && bt >= mt )); then + echo " maturity reached: latest blockTime=$bt >= maturityTime=$mt" + return 0 + fi + if (( $(date +%s) - start > max_sec )); then + echo "error: timeout waiting for maturity (requestId=$rid blockTime=$bt maturity=$mt)" >&2 + return 1 + fi + sleep 2 + done +} + +# min(units, max(1, units * bp / 10000)) — basis points fraction of LP units to exit. +compute_withdraw_units() { + local units="$1" + local bp="$2" + local out + if ! [[ "$units" =~ ^[0-9]+$ ]] || [[ "$units" == "0" ]]; then + echo "0" + return 0 + fi + if command -v python3 >/dev/null 2>&1; then + out="$(python3 -c "u=int('$units'); b=int('$bp'); print(min(u, max(1, u * b // 10000)))" 2>/dev/null || echo "0")" + else + out=$(( units * bp / 10000 )) + if (( out < 1 && units > 0 )); then + out=1 + fi + if (( out > units )); then + out=$units + fi + fi + printf '%s' "$out" +} + +run_one_withdraw_submit() { + local i="$1" + local name pk addr units wunits wsubmit=0 withdraw_retries_used=0 + name="dev${i}" + pk="$(dev_account_private_key_from_file "$name" "$DEV_ACCOUNTS_FILE" || true)" + if [[ -z "$pk" ]]; then + echo "FAIL|$name||0|missing private key" + return 0 + fi + addr="$(cast wallet address --private-key "$pk")" + units="$(pool_evm_call_uint256_args "$POOL_EVM_ADDR" "$EVM_RPC" "unitsOf(address)(uint256)" "$addr")" + if [[ ! "$units" =~ ^[0-9]+$ ]] || [[ "$units" == "n/a" ]]; then + echo "FAIL|$name||0|could not read unitsOf" + return 0 + fi + wunits="$(compute_withdraw_units "$units" "$WITHDRAW_FRACTION_BP")" + if [[ "$wunits" == "0" ]]; then + echo "SKIP|$name||0|withdraw units computed to 0" + return 0 + fi + echo " -- $name: units=$units withdrawUnits=$wunits" + log_user_withdraw_snapshot "$EVM_RPC" "$POOL_EVM_ADDR" "$BOND_PRECOMPILE" "$addr" "$name" "before_withdraw" + for _try in $(seq 1 "$WITHDRAW_SUBMIT_RETRIES"); do + if CAST_SEND_GAS_LIMIT="${WITHDRAW_CLAIM_GAS_LIMIT}" cast_send_expect_success "$EVM_RPC" "$pk" "$POOL_EVM_ADDR" "withdraw(uint256)" "$wunits"; then + wsubmit=1 + withdraw_retries_used=$((_try - 1)) + break + fi + echo " withdraw attempt $_try/$WITHDRAW_SUBMIT_RETRIES reverted; sleep ${WITHDRAW_RETRY_SLEEP_SECS}s then retry" + sleep "${WITHDRAW_RETRY_SLEEP_SECS}" + done + if [[ "$wsubmit" == "1" ]]; then + log_user_withdraw_snapshot "$EVM_RPC" "$POOL_EVM_ADDR" "$BOND_PRECOMPILE" "$addr" "$name" "after_withdraw" + echo "OK|$name||$withdraw_retries_used|" + else + echo "FAIL|$name||$withdraw_retries_used|withdraw failed after retries" + fi +} + +run_one_claim_withdraw() { + local rid="$1" name="$2" + local pk addr attempt=0 claim_ok=0 + pk="$(dev_account_private_key_from_file "$name" "$DEV_ACCOUNTS_FILE" || true)" + if [[ -z "$pk" ]]; then + echo "FAIL|$name|$rid|0|missing private key" + return 0 + fi + log_flow_section "claimWithdraw for requestId=$rid ($name)" \ + "After maturity, claimWithdraw returns principal + bond to the user (may retry if pool liquidity is still settling)." + if ! wait_until_mature_or_timeout "$rid" 300; then + echo "FAIL|$name|$rid|0|maturity wait failed" + return 0 + fi + addr="$(cast wallet address --private-key "$pk")" + log_user_withdraw_snapshot "$EVM_RPC" "$POOL_EVM_ADDR" "$BOND_PRECOMPILE" "$addr" "$name" "before_claimWithdraw" + echo " -- sending claimWithdraw(uint256) requestId=$rid" + while (( attempt < CLAIM_POLL_MAX_ATTEMPTS )); do + if CAST_SEND_GAS_LIMIT="${WITHDRAW_CLAIM_GAS_LIMIT}" cast_send_expect_success "$EVM_RPC" "$pk" "$POOL_EVM_ADDR" "claimWithdraw(uint256)" "$rid"; then + echo " claim ok requestId=$rid" + log_user_withdraw_snapshot "$EVM_RPC" "$POOL_EVM_ADDR" "$BOND_PRECOMPILE" "$addr" "$name" "after_claimWithdraw" + claim_ok=1 + break + fi + attempt=$((attempt + 1)) + echo " claim retry $attempt/$CLAIM_POLL_MAX_ATTEMPTS (insufficient liquid or still settling)..." + sleep "$CLAIM_POLL_INTERVAL_SECS" + done + if (( claim_ok == 1 )); then + echo "OK|$name|$rid|$attempt|" + else + echo "FAIL|$name|$rid|$attempt|claim failed after retries" + fi +} + +run_one_post_claim_rewards() { + local i="$1" + local name pk addr lb la delta + name="dev${i}" + pk="$(dev_account_private_key_from_file "$name" "$DEV_ACCOUNTS_FILE" || true)" + if [[ -z "$pk" ]]; then + echo "SKIP|$name||missing private key" + return 0 + fi + addr="$(cast wallet address --private-key "$pk")" + lb="$(normalize_cast_balance_wei "$(cast balance --rpc-url "$EVM_RPC" "$addr" 2>/dev/null || true)")" + echo " -- $name: claimRewards() (liquid_native before=$lb wei)" + log_user_withdraw_snapshot "$EVM_RPC" "$POOL_EVM_ADDR" "$BOND_PRECOMPILE" "$addr" "$name" "before_claimRewards_postwithdraw" + if ! cast_send_expect_success "$EVM_RPC" "$pk" "$POOL_EVM_ADDR" "claimRewards()"; then + echo "FAIL|$name||claimRewards failed" + return 0 + fi + la="$(normalize_cast_balance_wei "$(cast balance --rpc-url "$EVM_RPC" "$addr" 2>/dev/null || true)")" + log_user_withdraw_snapshot "$EVM_RPC" "$POOL_EVM_ADDR" "$BOND_PRECOMPILE" "$addr" "$name" "after_claimRewards_postwithdraw" + if [[ "$lb" =~ ^[0-9]+$ && "$la" =~ ^[0-9]+$ ]]; then + delta="$(python3 -c "print(int('$la') - int('$lb'))")" + echo " liquid_native delta (after - before) = $delta wei" + else + echo " liquid_native delta = n/a (could not parse cast balance)" + fi + echo "OK|$name||" +} + +# Withdraw queue → sleep unbonding → claimWithdraw each captured requestId in order. +run_withdraw_and_claim() { + if [[ "$WITHDRAW_USERS" == "0" ]]; then + log_flow_section "Withdraw / claim (skipped)" "WITHDRAW_USERS=0 — deposits only." + return 0 + fi + wait_for_total_staked 240 + + run_pre_withdraw_reward_sync + + local i pk name addr units wunits rid ub_sec wait_sec + local rid_window_start rid_window_end + local stress_mode + stress_mode=0 + if is_stress_mode_active; then + stress_mode=1 + fi + ub_sec="$(parse_unbonding_seconds "$(evmd query staking params --node "$NODE_RPC" -o json | jq -r '.params.unbonding_time // "30s"')")" + log_flow_section "Submit withdraw requests (first $WITHDRAW_USERS users)" \ + "For each user: read unitsOf, compute withdrawUnits = units * WITHDRAW_FRACTION_BP / 10000, call withdraw(withdrawUnits). Snapshots show before/after each tx." \ + "WITHDRAW_FRACTION_BP=$WITHDRAW_FRACTION_BP (basis points; 1000 = 10%). Staking unbonding_time ~${ub_sec}s; we then sleep unbonding+${UNBONDING_WAIT_BUFFER_SECS}s before maturity wait." + + declare -a RIDS=() + declare -a RID_OWNERS=() + declare -a WITHDRAW_USER_NAMES=() + declare -a WITHDRAW_USER_ADDRS=() + + for i in $(seq 0 $((WITHDRAW_USERS - 1))); do + name="dev${i}" + pk="$(dev_account_private_key_from_file "$name" "$DEV_ACCOUNTS_FILE" || true)" + if [[ -z "$pk" ]]; then + echo "error: missing $name for withdraw" >&2 + exit 1 + fi + WITHDRAW_USER_NAMES+=("$name") + WITHDRAW_USER_ADDRS+=("$(cast wallet address --private-key "$pk")") + done + rid_window_start="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "nextWithdrawRequestId()(uint256)")" + WITHDRAW_REQUEST_WINDOW_START="$rid_window_start" + + if is_parallel_mode_active && (( WITHDRAW_CONCURRENCY > 1 )); then + local batch_start batch_end conc tmpdir + local total_withdraw_batches + conc="$WITHDRAW_CONCURRENCY" + tmpdir="$(mktemp -d -t user_flow_wsub.XXXXXX)" + total_withdraw_batches=$(( (WITHDRAW_USERS + conc - 1) / conc )) + for batch_start in $(seq 0 "$conc" $((WITHDRAW_USERS - 1))); do + batch_end=$((batch_start + conc - 1)) + (( batch_end >= WITHDRAW_USERS )) && batch_end=$((WITHDRAW_USERS - 1)) + local batch_num=$((batch_start / conc + 1)) + echo " -- withdraw submit batch ${batch_num}/${total_withdraw_batches}: users dev${batch_start}..dev${batch_end}" + local pids=() files=() + for i in $(seq "$batch_start" "$batch_end"); do + local f="$tmpdir/wsub_${i}.out" + files+=("$f") + ( run_one_withdraw_submit "$i" ) >"$f" 2>&1 & + pids+=("$!") + done + for p in "${pids[@]}"; do wait "$p"; done + for f in "${files[@]}"; do + local line status uname rid_out retries msg + line="$(awk '/^(OK|FAIL|SKIP)\|/{print; exit}' "$f" 2>/dev/null || true)" + status="$(printf '%s' "$line" | awk -F'|' '{print $1}')" + uname="$(printf '%s' "$line" | awk -F'|' '{print $2}')" + rid_out="$(printf '%s' "$line" | awk -F'|' '{print $3}')" + retries="$(printf '%s' "$line" | awk -F'|' '{print $4}')" + msg="$(printf '%s' "$line" | awk -F'|' '{print $5}')" + [[ "$status" == "SKIP" ]] && continue + WITHDRAW_SUBMIT_ATTEMPTED=$((WITHDRAW_SUBMIT_ATTEMPTED + 1)) + [[ "$retries" =~ ^[0-9]+$ ]] || retries=0 + WITHDRAW_SUBMIT_RETRIES_TOTAL=$((WITHDRAW_SUBMIT_RETRIES_TOTAL + retries)) + if [[ "$status" == "OK" ]]; then + WITHDRAW_SUBMIT_SUCCESS=$((WITHDRAW_SUBMIT_SUCCESS + 1)) + : + else + WITHDRAW_SUBMIT_FAILED=$((WITHDRAW_SUBMIT_FAILED + 1)) + echo "error: withdraw failed for $uname${msg:+ ($msg)}" >&2 + if (( stress_mode != 1 )); then + rm -rf "$tmpdir" + exit 1 + fi + fi + done + echo " -- withdraw submit batch ${batch_num}/${total_withdraw_batches} complete: attempted=$WITHDRAW_SUBMIT_ATTEMPTED success=$WITHDRAW_SUBMIT_SUCCESS failed=$WITHDRAW_SUBMIT_FAILED" + log_contract_snapshot_for_batch "withdraw_submit" "${batch_num}/${total_withdraw_batches}" + sleep_ms "$BATCH_DELAY_MS" + done + rm -rf "$tmpdir" + else + for i in $(seq 0 $((WITHDRAW_USERS - 1))); do + line="$(run_one_withdraw_submit "$i" | awk '/^(OK|FAIL|SKIP)\|/{x=$0} END{print x}')" + status="$(printf '%s' "$line" | awk -F'|' '{print $1}')" + name="$(printf '%s' "$line" | awk -F'|' '{print $2}')" + rid="$(printf '%s' "$line" | awk -F'|' '{print $3}')" + retries="$(printf '%s' "$line" | awk -F'|' '{print $4}')" + msg="$(printf '%s' "$line" | awk -F'|' '{print $5}')" + [[ "$status" == "SKIP" ]] && continue + WITHDRAW_SUBMIT_ATTEMPTED=$((WITHDRAW_SUBMIT_ATTEMPTED + 1)) + [[ "$retries" =~ ^[0-9]+$ ]] || retries=0 + WITHDRAW_SUBMIT_RETRIES_TOTAL=$((WITHDRAW_SUBMIT_RETRIES_TOTAL + retries)) + if [[ "$status" != "OK" ]]; then + WITHDRAW_SUBMIT_FAILED=$((WITHDRAW_SUBMIT_FAILED + 1)) + echo "error: withdraw failed for $name${msg:+ ($msg)}" >&2 + if (( stress_mode == 1 )); then + echo "warning: stress mode enabled, continuing after withdraw failure for $name" >&2 + continue + fi + exit 1 + fi + WITHDRAW_SUBMIT_SUCCESS=$((WITHDRAW_SUBMIT_SUCCESS + 1)) + : + done + fi + + rid_window_end="$(pool_evm_call_uint256 "$POOL_EVM_ADDR" "$EVM_RPC" "nextWithdrawRequestId()(uint256)")" + WITHDRAW_REQUEST_WINDOW_END="$rid_window_end" + if [[ "$rid_window_start" =~ ^[0-9]+$ && "$rid_window_end" =~ ^[0-9]+$ ]] && (( rid_window_end > rid_window_start )); then + for rid in $(seq "$rid_window_start" $((rid_window_end - 1))); do + local owner_line owner_addr owner_name + owner_line="$(cast call --rpc-url "$EVM_RPC" "$POOL_EVM_ADDR" "withdrawRequests(uint256)(address,uint256,uint64,bool,bool)" "$rid" 2>/dev/null | awk 'NF{print; exit}' || true)" + owner_addr="$(printf '%s' "$owner_line" | awk '{print $1}')" + owner_name="" + local owner_addr_lc candidate_addr_lc + owner_addr_lc="$(printf '%s' "$owner_addr" | tr '[:upper:]' '[:lower:]')" + for idx in "${!WITHDRAW_USER_ADDRS[@]}"; do + candidate_addr_lc="$(printf '%s' "${WITHDRAW_USER_ADDRS[$idx]}" | tr '[:upper:]' '[:lower:]')" + if [[ "$candidate_addr_lc" == "$owner_addr_lc" ]]; then + owner_name="${WITHDRAW_USER_NAMES[$idx]}" + break + fi + done + if [[ -n "$owner_name" ]]; then + RIDS+=("$rid") + RID_OWNERS+=("$owner_name") + fi + done + fi + + WITHDRAW_REQUESTS_MAPPED="${#RIDS[@]}" + echo " mapped withdraw requests for claims: ${#RIDS[@]} (submitted_success=$WITHDRAW_SUBMIT_SUCCESS)" + if (( ${#RIDS[@]} == 0 )); then + echo "warning: no withdraw txs executed" >&2 + return 0 + fi + + wait_sec=$(( ub_sec + UNBONDING_WAIT_BUFFER_SECS )) + log_flow_section "Wait for unbonding (wall clock)" \ + "Sleeping ${wait_sec}s = staking unbonding_time (${ub_sec}s) + UNBONDING_WAIT_BUFFER_SECS (${UNBONDING_WAIT_BUFFER_SECS}s). Then we wait for each withdraw request’s on-chain maturity time." + sleep "$wait_sec" + + if is_parallel_mode_active && (( CLAIM_CONCURRENCY > 1 )); then + local cconc ctmpdir cstart cend + local total_claim_batches + cconc="$CLAIM_CONCURRENCY" + ctmpdir="$(mktemp -d -t user_flow_claim.XXXXXX)" + local total_claims="${#RIDS[@]}" + total_claim_batches=$(( (total_claims + cconc - 1) / cconc )) + for cstart in $(seq 0 "$cconc" $((total_claims - 1))); do + cend=$((cstart + cconc - 1)) + (( cend >= total_claims )) && cend=$((total_claims - 1)) + local cbatch_num=$((cstart / cconc + 1)) + echo " -- claimWithdraw batch ${cbatch_num}/${total_claim_batches}: requests index ${cstart}..${cend}" + local pids=() files=() + for idx in $(seq "$cstart" "$cend"); do + rid="${RIDS[$idx]}" + name="${RID_OWNERS[$idx]}" + local f="$ctmpdir/claim_${idx}.out" + files+=("$f") + ( run_one_claim_withdraw "$rid" "$name" ) >"$f" 2>&1 & + pids+=("$!") + done + for p in "${pids[@]}"; do wait "$p"; done + for f in "${files[@]}"; do + local line status retries msg rid_out + line="$(awk '/^(OK|FAIL)\|/{print; exit}' "$f" 2>/dev/null || true)" + status="$(printf '%s' "$line" | awk -F'|' '{print $1}')" + rid_out="$(printf '%s' "$line" | awk -F'|' '{print $3}')" + retries="$(printf '%s' "$line" | awk -F'|' '{print $4}')" + msg="$(printf '%s' "$line" | awk -F'|' '{print $5}')" + CLAIM_ATTEMPTED=$((CLAIM_ATTEMPTED + 1)) + [[ "$retries" =~ ^[0-9]+$ ]] || retries=0 + CLAIM_RETRIES_TOTAL=$((CLAIM_RETRIES_TOTAL + retries)) + if [[ "$status" == "OK" ]]; then + CLAIM_SUCCESS=$((CLAIM_SUCCESS + 1)) + else + CLAIM_FAILED=$((CLAIM_FAILED + 1)) + echo "error: claim failed for requestId=$rid_out${msg:+ ($msg)}" >&2 + if (( stress_mode != 1 )); then + rm -rf "$ctmpdir" + exit 1 + fi + fi + done + echo " -- claimWithdraw batch ${cbatch_num}/${total_claim_batches} complete: attempted=$CLAIM_ATTEMPTED success=$CLAIM_SUCCESS failed=$CLAIM_FAILED" + log_contract_snapshot_for_batch "claimWithdraw" "${cbatch_num}/${total_claim_batches}" + sleep_ms "$BATCH_DELAY_MS" + done + rm -rf "$ctmpdir" + else + for idx in "${!RIDS[@]}"; do + rid="${RIDS[$idx]}" + name="${RID_OWNERS[$idx]}" + line="$(run_one_claim_withdraw "$rid" "$name" | awk '/^(OK|FAIL)\|/{x=$0} END{print x}')" + status="$(printf '%s' "$line" | awk -F'|' '{print $1}')" + retries="$(printf '%s' "$line" | awk -F'|' '{print $4}')" + msg="$(printf '%s' "$line" | awk -F'|' '{print $5}')" + CLAIM_ATTEMPTED=$((CLAIM_ATTEMPTED + 1)) + [[ "$retries" =~ ^[0-9]+$ ]] || retries=0 + CLAIM_RETRIES_TOTAL=$((CLAIM_RETRIES_TOTAL + retries)) + if [[ "$status" != "OK" ]]; then + CLAIM_FAILED=$((CLAIM_FAILED + 1)) + echo "error: claim failed for requestId=$rid${msg:+ ($msg)}" >&2 + if (( stress_mode == 1 )); then + echo "warning: stress mode enabled, continuing after claim failure for requestId=$rid" >&2 + continue + fi + exit 1 + fi + CLAIM_SUCCESS=$((CLAIM_SUCCESS + 1)) + done + fi + + local verified_claimed=0 + for rid in "${RIDS[@]}"; do + local claimed_flag + claimed_flag="$(withdraw_request_claimed_flag "$rid")" + if [[ "$claimed_flag" == "true" ]]; then + verified_claimed=$((verified_claimed + 1)) + fi + done + WITHDRAW_REQUESTS_CLAIMED_VERIFIED="$verified_claimed" + echo " verified claim flags on-chain: claimed=$verified_claimed/${#RIDS[@]}" +} + +# After claimWithdraws: optional extra claimRewards per user (reward index path vs withdraw-embedded). +run_post_claimwithdraw_claim_rewards() { + if [[ "${POST_CLAIMWITHDRAW_CLAIM_REWARDS:-0}" != "1" ]]; then + return 0 + fi + if [[ "$WITHDRAW_USERS" == "0" ]]; then + echo "POST_CLAIMWITHDRAW_CLAIM_REWARDS=1 but WITHDRAW_USERS=0 — skipping (no claimWithdraw phase)" >&2 + return 0 + fi + + local n="${POST_CLAIMWITHDRAW_USERS:-}" + if [[ -z "$n" ]]; then + n="$USER_COUNT" + fi + if ! [[ "$n" =~ ^[0-9]+$ ]] || (( n < 1 )); then + echo "error: POST_CLAIMWITHDRAW_USERS must be a positive integer" >&2 + exit 1 + fi + + local h0 h1 + h0="$(cast block-number --rpc-url "$EVM_RPC" 2>/dev/null || echo "?")" + log_flow_section "Standalone claimRewards() (post claimWithdraw)" \ + "Optional path: after all claimWithdraws, wait POST_CLAIMWITHDRAW_WAIT_SECS=(${POST_CLAIMWITHDRAW_WAIT_SECS}s) so more blocks accrue rewards, then claimRewards() per user." \ + "This exercises reward accounting vs rewards already folded into withdraw(). First block height ≈ $h0." + sleep "${POST_CLAIMWITHDRAW_WAIT_SECS}" + h1="$(cast block-number --rpc-url "$EVM_RPC" 2>/dev/null || echo "?")" + echo " after wait: block≈$h1 — claiming for $n user(s) (dev0..dev$((n - 1)))" + + local i + if is_parallel_mode_active && (( CLAIM_REWARDS_CONCURRENCY > 1 )); then + local conc tmpdir batch_start batch_end + local total_rewards_batches + conc="$CLAIM_REWARDS_CONCURRENCY" + tmpdir="$(mktemp -d -t user_flow_creward.XXXXXX)" + total_rewards_batches=$(( (n + conc - 1) / conc )) + for batch_start in $(seq 0 "$conc" $((n - 1))); do + batch_end=$((batch_start + conc - 1)) + (( batch_end >= n )) && batch_end=$((n - 1)) + local rbatch_num=$((batch_start / conc + 1)) + echo " -- claimRewards batch ${rbatch_num}/${total_rewards_batches}: users dev${batch_start}..dev${batch_end}" + local pids=() files=() + for i in $(seq "$batch_start" "$batch_end"); do + local f="$tmpdir/creward_${i}.out" + files+=("$f") + ( run_one_post_claim_rewards "$i" ) >"$f" 2>&1 & + pids+=("$!") + done + for p in "${pids[@]}"; do wait "$p"; done + for f in "${files[@]}"; do + local line status uname msg + line="$(awk '/^(OK|FAIL|SKIP)\|/{print; exit}' "$f" 2>/dev/null || true)" + status="$(printf '%s' "$line" | awk -F'|' '{print $1}')" + uname="$(printf '%s' "$line" | awk -F'|' '{print $2}')" + msg="$(printf '%s' "$line" | awk -F'|' '{print $4}')" + [[ "$status" == "SKIP" ]] && continue + POST_CLAIM_REWARDS_ATTEMPTED=$((POST_CLAIM_REWARDS_ATTEMPTED + 1)) + if [[ "$status" == "OK" ]]; then + POST_CLAIM_REWARDS_SUCCESS=$((POST_CLAIM_REWARDS_SUCCESS + 1)) + else + POST_CLAIM_REWARDS_FAILED=$((POST_CLAIM_REWARDS_FAILED + 1)) + echo "error: claimRewards failed for $uname${msg:+ ($msg)}" >&2 + if ! is_stress_mode_active; then + rm -rf "$tmpdir" + exit 1 + fi + fi + done + echo " -- claimRewards batch ${rbatch_num}/${total_rewards_batches} complete: attempted=$POST_CLAIM_REWARDS_ATTEMPTED success=$POST_CLAIM_REWARDS_SUCCESS failed=$POST_CLAIM_REWARDS_FAILED" + log_contract_snapshot_for_batch "claimRewards" "${rbatch_num}/${total_rewards_batches}" + sleep_ms "$BATCH_DELAY_MS" + done + rm -rf "$tmpdir" + else + for i in $(seq 0 $((n - 1))); do + local line status name msg + line="$(run_one_post_claim_rewards "$i" | awk '/^(OK|FAIL|SKIP)\|/{x=$0} END{print x}')" + status="$(printf '%s' "$line" | awk -F'|' '{print $1}')" + name="$(printf '%s' "$line" | awk -F'|' '{print $2}')" + msg="$(printf '%s' "$line" | awk -F'|' '{print $4}')" + [[ "$status" == "SKIP" ]] && continue + POST_CLAIM_REWARDS_ATTEMPTED=$((POST_CLAIM_REWARDS_ATTEMPTED + 1)) + if [[ "$status" != "OK" ]]; then + echo "error: claimRewards failed for $name${msg:+ ($msg)}" >&2 + POST_CLAIM_REWARDS_FAILED=$((POST_CLAIM_REWARDS_FAILED + 1)) + if is_stress_mode_active; then + echo "warning: stress mode enabled, continuing after claimRewards failure for $name" >&2 + continue + fi + exit 1 + fi + POST_CLAIM_REWARDS_SUCCESS=$((POST_CLAIM_REWARDS_SUCCESS + 1)) + done + fi +} + +print_stress_summary() { + local runtime_sec withdraw_avg_retries claim_avg_retries claim_completion_rate + runtime_sec=$((RUN_END_TS - RUN_START_TS)) + withdraw_avg_retries="$(safe_avg_2dp "$WITHDRAW_SUBMIT_RETRIES_TOTAL" "$WITHDRAW_SUBMIT_ATTEMPTED")" + claim_avg_retries="$(safe_avg_2dp "$CLAIM_RETRIES_TOTAL" "$CLAIM_ATTEMPTED")" + claim_completion_rate="$(safe_ratio_percent_2dp "$CLAIM_SUCCESS" "$WITHDRAW_SUBMIT_SUCCESS")" + + log_flow_section "Stress profile summary" \ + "profile=$USER_FLOW_STRESS_PROFILE mode=$USER_FLOW_MODE runtime_sec=$runtime_sec" \ + "deposits_ok=$DEPOSIT_OK deposits_failed=$DEPOSIT_FAIL" \ + "withdraw_submit_attempted=$WITHDRAW_SUBMIT_ATTEMPTED succeeded=$WITHDRAW_SUBMIT_SUCCESS failed=$WITHDRAW_SUBMIT_FAILED avg_retries=$withdraw_avg_retries" \ + "claim_attempted=$CLAIM_ATTEMPTED succeeded=$CLAIM_SUCCESS failed=$CLAIM_FAILED avg_retries=$claim_avg_retries completion_rate=${claim_completion_rate}% (claims_succeeded/withdraws_submitted)" \ + "post_claimRewards_attempted=$POST_CLAIM_REWARDS_ATTEMPTED succeeded=$POST_CLAIM_REWARDS_SUCCESS failed=$POST_CLAIM_REWARDS_FAILED" +} + +parse_cli() { + while [[ $# -gt 0 ]]; do + case "$1" in + -h|--help) usage; exit 0 ;; + --deposits-only) WITHDRAW_USERS=0; shift ;; + *) echo "unknown arg: $1" >&2; usage; exit 1 ;; + esac + done +} + +apply_stress_profile_overrides() { + case "${USER_FLOW_STRESS_PROFILE:-}" in + "" ) + return 0 + ;; + 100users|stress100) + # Profile defaults apply only when users did not explicitly set env values. + if (( USER_COUNT_SET_BY_ENV == 0 )); then USER_COUNT=100; fi + if (( WITHDRAW_USERS_SET_BY_ENV == 0 )); then WITHDRAW_USERS=30; fi + if (( FAIL_FAST_SET_BY_ENV == 0 )); then FAIL_FAST=0; fi + if (( DEPOSIT_INTERVAL_SECS_SET_BY_ENV == 0 )); then DEPOSIT_INTERVAL_SECS=0; fi + if (( WITHDRAW_SUBMIT_RETRIES_SET_BY_ENV == 0 )); then WITHDRAW_SUBMIT_RETRIES=40; fi + if (( CLAIM_POLL_MAX_ATTEMPTS_SET_BY_ENV == 0 )); then CLAIM_POLL_MAX_ATTEMPTS=180; fi + if (( USER_FLOW_MODE_SET_BY_ENV == 0 )); then USER_FLOW_MODE=parallel; fi + if (( DEPOSIT_CONCURRENCY_SET_BY_ENV == 0 )); then DEPOSIT_CONCURRENCY=10; fi + if (( WITHDRAW_CONCURRENCY_SET_BY_ENV == 0 )); then WITHDRAW_CONCURRENCY=8; fi + if (( CLAIM_CONCURRENCY_SET_BY_ENV == 0 )); then CLAIM_CONCURRENCY=5; fi + if (( CLAIM_REWARDS_CONCURRENCY_SET_BY_ENV == 0 )); then CLAIM_REWARDS_CONCURRENCY=12; fi + if (( BATCH_DELAY_MS_SET_BY_ENV == 0 )); then BATCH_DELAY_MS=100; fi + # Reduce RPC mempool/nonce contention for high-concurrency stress runs. + if [[ -z "${CAST_SEND_RESILIENT_MODE+x}" ]]; then CAST_SEND_RESILIENT_MODE=true; fi + if [[ -z "${DEPOSIT_GAS_ESCALATION+x}" ]]; then DEPOSIT_GAS_ESCALATION=true; fi + ;; + *) + echo "error: unknown USER_FLOW_STRESS_PROFILE=$USER_FLOW_STRESS_PROFILE (expected: 100users|stress100)" >&2 + exit 1 + ;; + esac +} + +validate_flow_knobs() { + if [[ "$USER_FLOW_MODE" != "serial" && "$USER_FLOW_MODE" != "parallel" ]]; then + echo "error: USER_FLOW_MODE must be serial or parallel (got: $USER_FLOW_MODE)" >&2 + exit 1 + fi + for v in DEPOSIT_CONCURRENCY WITHDRAW_CONCURRENCY CLAIM_CONCURRENCY CLAIM_REWARDS_CONCURRENCY; do + if ! [[ "${!v}" =~ ^[0-9]+$ ]] || (( ${!v} < 1 )); then + echo "error: $v must be a positive integer (got: ${!v})" >&2 + exit 1 + fi + done + if ! [[ "$BATCH_DELAY_MS" =~ ^[0-9]+$ ]]; then + echo "error: BATCH_DELAY_MS must be a non-negative integer (got: $BATCH_DELAY_MS)" >&2 + exit 1 + fi +} + +main() { + RUN_START_TS="$(date +%s)" + parse_cli "$@" + apply_stress_profile_overrides + validate_flow_knobs + require_bin jq + require_bin curl + require_bin evmd + require_bin cast + + if [[ ! -f "$DEV_ACCOUNTS_FILE" ]]; then + echo "error: DEV_ACCOUNTS_FILE not found: $DEV_ACCOUNTS_FILE" >&2 + echo "hint: generate dev accounts via multi_node_startup / rebalance_scenario_runner" >&2 + exit 1 + fi + log_flow_section "Preflight" \ + "USER_COUNT=$USER_COUNT WITHDRAW_USERS=$WITHDRAW_USERS DEV_ACCOUNTS_FILE=$DEV_ACCOUNTS_FILE" \ + "Existing dev accounts in file: $(count_dev_accounts_in_file "$DEV_ACCOUNTS_FILE")" + # Needed before auto-provisioning new accounts, which sends funding txs. + ensure_evm_rpc_ready + normalize_user_counts_for_available_accounts + + resolve_pool_evm_addr + + log_flow_section "Run summary" \ + "EVM_RPC=$EVM_RPC NODE_RPC=$NODE_RPC USER_COUNT=$USER_COUNT WITHDRAW_USERS=$WITHDRAW_USERS POST_CLAIMWITHDRAW_CLAIM_REWARDS=${POST_CLAIMWITHDRAW_CLAIM_REWARDS:-0}" \ + "USER_FLOW_STRESS_PROFILE=${USER_FLOW_STRESS_PROFILE:-none} USER_FLOW_MODE=$USER_FLOW_MODE FAIL_FAST=$FAIL_FAST DEPOSIT_INTERVAL_SECS=$DEPOSIT_INTERVAL_SECS WITHDRAW_SUBMIT_RETRIES=$WITHDRAW_SUBMIT_RETRIES CLAIM_POLL_MAX_ATTEMPTS=$CLAIM_POLL_MAX_ATTEMPTS" \ + "DEPOSIT_CONCURRENCY=$DEPOSIT_CONCURRENCY WITHDRAW_CONCURRENCY=$WITHDRAW_CONCURRENCY CLAIM_CONCURRENCY=$CLAIM_CONCURRENCY CLAIM_REWARDS_CONCURRENCY=$CLAIM_REWARDS_CONCURRENCY BATCH_DELAY_MS=$BATCH_DELAY_MS" + + if [[ "$SKIP_DEPOSITS" == "1" ]]; then + echo "SKIP_DEPOSITS=1 — skipping deposit loop" + else + run_deposits + fi + + log_pool_snapshot + print_contract_correctness_checks "post-deposits" + + if [[ "$WITHDRAW_USERS" != "0" ]]; then + run_withdraw_and_claim + print_contract_correctness_checks "post-claimWithdraw" + fi + + run_post_claimwithdraw_claim_rewards + print_contract_correctness_checks "final" + + RUN_END_TS="$(date +%s)" + if is_stress_mode_active; then + print_stress_summary + fi + + log_flow_section "Done" "All requested phases finished. Repo: $ROOT_DIR" +} + +main "$@" diff --git a/tests/integration/precompiles/communitypool/TEST_ASSUMPTIONS.md b/tests/integration/precompiles/communitypool/TEST_ASSUMPTIONS.md new file mode 100644 index 00000000..5d48f5b8 --- /dev/null +++ b/tests/integration/precompiles/communitypool/TEST_ASSUMPTIONS.md @@ -0,0 +1,62 @@ +# CommunityPool Integration Test Assumptions + +This document captures assumptions that the `communitypool` integration suite depends on for deterministic behavior. + +## Environment assumptions + +- The suite runs against the standard integration test network created by `network.NewUnitTestNetwork`. +- The chain has a valid staking bond denom and an ERC20 token pair for that denom. +- At least one active validator exists in the network validator set. + +## Contract + artifact assumptions + +- `contracts/solidity/pool/CommunityPool.json` matches the current `CommunityPool.sol` implementation. +- `contracts/community_pool.go` successfully loads that artifact via `LoadCommunityPool()`. + +## Test helper assumptions + +- Read-only contract checks use `QueryContract(...)` (not tx execution), so nonce state is not mutated by view calls. +- Successful tx helper (`execTxExpectSuccess`) sets a default gas limit when none is provided, to avoid estimator/limit edge cases in precompile-heavy paths (for example, `harvest`). +- Tests commit blocks (`network.NextBlock()`) between state-changing calls that require finalized state for subsequent reads/assertions. + +## Behavioral assumptions under test + +- Deposit/withdraw accounting uses floor rounding and must never over-mint shares. +- Owner-gated methods (`setConfig`, `syncTotalStaked`, `transferOwnership`) enforce access control. +- `stake()` and `harvest()` are restricted to `owner` or configured `automationCaller`. +- `reconcileTotalStaked` is restricted to `automationCaller` only (not `owner`). +- `syncTotalStaked` remains owner-only break-glass for bonded accounting sync. +- `principalAssets` is `stakeablePrincipalLedger + totalStaked`; `pricePerUnit` and deposit minting use that total. +- User `withdraw` sizes `amountOut` from **`totalStaked` only** (proportional to units burned). +- Conservative pre-audit policy: any `withdraw` requires all withdraw-relevant principal to be bonded. + If `stakeablePrincipalLedger > 0`, withdraw reverts. +- Full-exit safety rule: `withdraw(userUnits == totalUnits)` reverts with + `FullExitLeavesNonStakedPrincipal(uint256)` when `stakeablePrincipalLedger > 0`. +- Partial withdraw under non-bonded principal reverts with + `WithdrawRequiresAllPrincipalBonded(uint256)`. +- `stake()` delegates through `staking.delegateToBondedValidators(address(this), liquid, maxValidators)`. +- The staking precompile path is atomic at transaction scope: if any internal per-validator delegate fails, no partial delegation state persists. +- Validator selection policy for `stake()` is the first `maxValidators` bonded validators in staking precompile query order. +- Poolrebalancer target selection is independently the staking keeper bonded-by-power top-`max_target_validators` set. Exact ordering equivalence is not required; rebalance is the intended drift-correction path. +- Delegation split policy is deterministic: `amount / n` base per validator and `amount % n` remainder distributed as `+1` to the first remainder validators. +- `syncTotalStaked` is accounting-only and must not create staking side effects. It updates bonded `totalStaked` only. +- Withdraw maturity lifecycle is covered end-to-end: withdraw request creation, maturity advance, `claimWithdraw` payout, request claimed flag, and reserve invariants. +- Integration asserts `claimWithdraw` return values and state transitions; exact ERC20 wallet balance-delta equality is validated in Forge tests where token flows are fully deterministic. + +## Out-of-scope / covered elsewhere + +- Dust deposit (`ZeroMintedUnits`) and specific `FullExitLeavesNonStakedPrincipal` edge cases are covered in Forge tests under `contracts/test/pool/`. +- Detailed staking-precompile internal atomicity and validator ordering semantics are covered in precompile and module-level tests; this integration suite validates contract behavior against chain wiring. + +## Poolrebalancer assumptions + +- Poolrebalancer EndBlock automation continues to run `harvest`/`stake` and bonded-only reconcile via `reconcileTotalStaked`. + +## Stability notes + +- Integration suites built on `network.NewUnitTestNetwork` (CommunityPool Ginkgo, poolrebalancer stub-EVM, etc.) need **`-tags=test`** (singular, not `tests`) so the `test`-tag build of `x/vm/types` provides `EVMConfigurator.ResetTestConfig`. +- Redelegation queue maturity and cleanup semantics are covered by `x/poolrebalancer/keeper` unit tests and the poolrebalancer integration suite. + +- If staking precompile validator ordering or bonded-set query semantics change, tests should still hold if rebalance converges stake into the keeper target set; update expectations only if the explicit policy above changes. +- If default gas behavior changes in factory or precompiles, tx helper gas defaults may need adjustment. +- If ownership/permissions policy changes, tests must be updated to reflect the new access model. diff --git a/tests/integration/precompiles/communitypool/test_integration.go b/tests/integration/precompiles/communitypool/test_integration.go new file mode 100644 index 00000000..baf3c09f --- /dev/null +++ b/tests/integration/precompiles/communitypool/test_integration.go @@ -0,0 +1,280 @@ +package communitypool + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + + //nolint:revive + . "github.com/onsi/ginkgo/v2" + //nolint:revive + . "github.com/onsi/gomega" + + "github.com/cosmos/evm/testutil/integration/evm/network" +) + +func TestCommunityPoolIntegrationSuite(t *testing.T, create network.CreateEvmApp, options ...network.ConfigOption) { + _ = Describe("CommunityPool integration scaffold", func() { + var s *IntegrationTestSuite + + BeforeEach(func() { + s = NewIntegrationTestSuite(create, options...) + s.SetupTest() + }) + + It("sets up suite dependencies for CommunityPool tests", func() { + Expect(s.network).ToNot(BeNil()) + Expect(s.factory).ToNot(BeNil()) + Expect(s.grpcHandler).ToNot(BeNil()) + Expect(s.keyring).ToNot(BeNil()) + Expect(s.bondDenom).ToNot(BeEmpty()) + Expect(s.bondTokenAddr).ToNot(Equal([20]byte{})) + Expect(s.communityPoolContract.Bin).ToNot(BeEmpty()) + }) + + It("reverts withdraw when stakeable principal is non-zero", func() { + poolAddr := s.deployCommunityPool(0, 10, 5, big.NewInt(1)) + user := s.keyring.GetKey(1) + amount := big.NewInt(1000) + + s.approveBondToken(1, poolAddr, amount) + s.execTxExpectSuccess( + user.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "deposit", amount), + ) + Expect(s.network.NextBlock()).To(BeNil()) + + s.execTxExpectCustomError( + user.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "withdraw", big.NewInt(1)), + "WithdrawRequiresAllPrincipalBonded(uint256)", + ) + }) + + It("reconcileTotalStaked updates principalAssets and pricePerUnit through totalStaked only", func() { + poolAddr := s.deployCommunityPool(0, 10, 5, big.NewInt(1)) + owner := s.keyring.GetKey(0) + user := s.keyring.GetKey(1) + automation := s.keyring.GetKey(2) + + amount := big.NewInt(1000) + s.approveBondToken(1, poolAddr, amount) + s.execTxExpectSuccess( + user.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "deposit", amount), + ) + Expect(s.network.NextBlock()).To(BeNil()) + + assetsBefore := s.queryPoolUint(0, poolAddr, "principalAssets") + ppuBefore := s.queryPoolUint(0, poolAddr, "pricePerUnit") + Expect(assetsBefore.String()).To(Equal("1000")) + Expect(ppuBefore.String()).To(Equal("1000000000000000000")) + + s.execTxExpectSuccess( + owner.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "setAutomationCaller", automation.Addr), + ) + Expect(s.network.NextBlock()).To(BeNil()) + + s.execTxExpectSuccess( + automation.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "reconcileTotalStaked", big.NewInt(500)), + ) + Expect(s.network.NextBlock()).To(BeNil()) + + totalStaked := s.queryPoolUint(0, poolAddr, "totalStaked") + assetsAfter := s.queryPoolUint(0, poolAddr, "principalAssets") + ppuAfter := s.queryPoolUint(0, poolAddr, "pricePerUnit") + Expect(totalStaked.String()).To(Equal("500")) + Expect(assetsAfter.String()).To(Equal("1500")) + Expect(ppuAfter.String()).To(Equal("1500000000000000000")) + }) + + It("owner syncTotalStaked remains available and owner-gated", func() { + poolAddr := s.deployCommunityPool(0, 10, 5, big.NewInt(1)) + owner := s.keyring.GetKey(0) + nonOwner := s.keyring.GetKey(1) + + s.execTxExpectCustomError( + nonOwner.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "syncTotalStaked", big.NewInt(100)), + "Unauthorized()", + ) + + s.execTxExpectSuccess( + owner.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "syncTotalStaked", big.NewInt(100)), + ) + Expect(s.network.NextBlock()).To(BeNil()) + Expect(s.queryPoolUint(0, poolAddr, "totalStaked").String()).To(Equal("100")) + }) + + It("restricts reconcileTotalStaked to automation caller", func() { + poolAddr := s.deployCommunityPool(0, 10, 5, big.NewInt(1)) + owner := s.keyring.GetKey(0) + automation := s.keyring.GetKey(2) + + s.execTxExpectSuccess( + owner.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "setAutomationCaller", automation.Addr), + ) + Expect(s.network.NextBlock()).To(BeNil()) + + s.execTxExpectCustomError( + owner.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "reconcileTotalStaked", big.NewInt(1)), + "Unauthorized()", + ) + + s.execTxExpectSuccess( + automation.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "reconcileTotalStaked", big.NewInt(321)), + ) + Expect(s.network.NextBlock()).To(BeNil()) + Expect(s.queryPoolUint(0, poolAddr, "totalStaked").String()).To(Equal("321")) + }) + + It("returns expected pricePerUnit for empty and adjusted pool", func() { + poolAddr := s.deployCommunityPool(0, 10, 5, big.NewInt(1)) + owner := s.keyring.GetKey(0) + user := s.keyring.GetKey(1) + + emptyPPU := s.queryPoolUint(0, poolAddr, "pricePerUnit") + Expect(emptyPPU.String()).To(Equal("1000000000000000000")) + + amount := big.NewInt(1000) + s.approveBondToken(1, poolAddr, amount) + s.execTxExpectSuccess( + user.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "deposit", amount), + ) + Expect(s.network.NextBlock()).To(BeNil()) + + s.execTxExpectSuccess( + owner.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "syncTotalStaked", big.NewInt(1000)), + ) + Expect(s.network.NextBlock()).To(BeNil()) + + updatedPPU := s.queryPoolUint(0, poolAddr, "pricePerUnit") + Expect(updatedPPU.String()).To(Equal("2000000000000000000")) + }) + + It("runs two-user withdraw maturity lifecycle and claimWithdraw payout", func() { + poolAddr := s.deployCommunityPool(0, 10, 5, big.NewInt(1)) + owner := s.keyring.GetKey(0) + userA := s.keyring.GetKey(1) + userB := s.keyring.GetKey(2) + + amountA := big.NewInt(900) + amountB := big.NewInt(600) + s.approveBondToken(1, poolAddr, amountA) + s.approveBondToken(2, poolAddr, amountB) + s.execTxExpectSuccess(userA.Priv, buildTxArgs(poolAddr), buildCallArgs(s.communityPoolContract, "deposit", amountA)) + s.execTxExpectSuccess(userB.Priv, buildTxArgs(poolAddr), buildCallArgs(s.communityPoolContract, "deposit", amountB)) + Expect(s.network.NextBlock()).To(BeNil()) + + s.execTxExpectSuccess(owner.Priv, buildTxArgs(poolAddr), buildCallArgs(s.communityPoolContract, "stake")) + Expect(s.network.NextBlock()).To(BeNil()) + + userAUnits := s.queryPoolUint(0, poolAddr, "unitsOf", userA.Addr) + totalStakedBefore := s.queryPoolUint(0, poolAddr, "totalStaked") + totalUnitsBefore := s.queryPoolUint(0, poolAddr, "totalUnits") + expectedOut := new(big.Int).Mul(new(big.Int).Set(userAUnits), new(big.Int).Set(totalStakedBefore)) + expectedOut.Quo(expectedOut, totalUnitsBefore) + + withdrawRes := s.execTxAndGetEthResponse( + userA.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "withdraw", userAUnits), + ) + Expect(s.network.NextBlock()).To(BeNil()) + + withdrawOut, err := s.communityPoolContract.ABI.Unpack("withdraw", withdrawRes.Ret) + Expect(err).To(BeNil(), "failed to unpack withdraw output") + Expect(withdrawOut).To(HaveLen(1)) + requestID, ok := withdrawOut[0].(*big.Int) + Expect(ok).To(BeTrue(), "unexpected withdraw return type") + + req := s.queryWithdrawRequest(poolAddr, requestID) + Expect(req.Owner).To(Equal(userA.Addr)) + Expect(req.AmountOut.String()).To(Equal(expectedOut.String())) + Expect(req.Claimed).To(BeFalse()) + + s.advanceToMaturity(req.Maturity) + + claimRes := s.execTxAndGetEthResponse( + userA.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "claimWithdraw", requestID), + ) + Expect(s.network.NextBlock()).To(BeNil()) + + claimOut, err := s.communityPoolContract.ABI.Unpack("claimWithdraw", claimRes.Ret) + Expect(err).To(BeNil(), "failed to unpack claimWithdraw output") + Expect(claimOut).To(HaveLen(1)) + claimedAmount, ok := claimOut[0].(*big.Int) + Expect(ok).To(BeTrue(), "unexpected claimWithdraw return type") + Expect(claimedAmount.String()).To(Equal(expectedOut.String())) + + reqAfter := s.queryWithdrawRequest(poolAddr, requestID) + Expect(reqAfter.Claimed).To(BeTrue()) + + s.assertPoolInvariants(poolAddr) + }) + + It("keeps ownership transfer behavior unchanged", func() { + poolAddr := s.deployCommunityPool(0, 10, 5, big.NewInt(1)) + oldOwner := s.keyring.GetKey(0) + newOwner := s.keyring.GetKey(1) + + s.execTxExpectSuccess( + oldOwner.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "transferOwnership", newOwner.Addr), + ) + Expect(s.network.NextBlock()).To(BeNil()) + + s.execTxExpectCustomError( + oldOwner.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "setConfig", uint32(99), uint32(9), big.NewInt(3)), + "Unauthorized()", + ) + + s.execTxExpectSuccess( + newOwner.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "setConfig", uint32(99), uint32(9), big.NewInt(3)), + ) + }) + + It("rejects transferOwnership to zero address", func() { + poolAddr := s.deployCommunityPool(0, 10, 5, big.NewInt(1)) + owner := s.keyring.GetKey(0) + zeroAddr := common.Address{} + s.execTxExpectCustomError( + owner.Priv, + buildTxArgs(poolAddr), + buildCallArgs(s.communityPoolContract, "transferOwnership", zeroAddr), + "InvalidAddress()", + ) + }) + }) + + RegisterFailHandler(Fail) + RunSpecs(t, "CommunityPool Integration Suite") +} diff --git a/tests/integration/precompiles/communitypool/test_setup.go b/tests/integration/precompiles/communitypool/test_setup.go new file mode 100644 index 00000000..4edb508e --- /dev/null +++ b/tests/integration/precompiles/communitypool/test_setup.go @@ -0,0 +1,85 @@ +package communitypool + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/suite" + . "github.com/onsi/gomega" + + compiledcontracts "github.com/cosmos/evm/contracts" + "github.com/cosmos/evm/precompiles/erc20" + "github.com/cosmos/evm/testutil/integration/evm/factory" + "github.com/cosmos/evm/testutil/integration/evm/grpc" + "github.com/cosmos/evm/testutil/integration/evm/network" + "github.com/cosmos/evm/testutil/integration/evm/utils" + testkeyring "github.com/cosmos/evm/testutil/keyring" + evmtypes "github.com/cosmos/evm/x/vm/types" +) + +// IntegrationTestSuite contains shared setup/state for CommunityPool integration tests. +type IntegrationTestSuite struct { + suite.Suite + + create network.CreateEvmApp + options []network.ConfigOption + + network *network.UnitTestNetwork + factory factory.TxFactory + grpcHandler grpc.Handler + keyring testkeyring.Keyring + + bondDenom string + bondTokenAddr common.Address + bondTokenPC *erc20.Precompile + + communityPoolContract evmtypes.CompiledContract +} + +func NewIntegrationTestSuite(create network.CreateEvmApp, options ...network.ConfigOption) *IntegrationTestSuite { + return &IntegrationTestSuite{ + create: create, + options: options, + } +} + +func (s *IntegrationTestSuite) SetupTest() { + keys := testkeyring.New(3) + genesis := utils.CreateGenesisWithTokenPairs(keys) + + opts := []network.ConfigOption{ + network.WithPreFundedAccounts(keys.GetAllAccAddrs()...), + network.WithCustomGenesis(genesis), + } + opts = append(opts, s.options...) + + nw := network.NewUnitTestNetwork(s.create, opts...) + gh := grpc.NewIntegrationHandler(nw) + tf := factory.New(nw, gh) + + ctx := nw.GetContext() + sk := nw.App.GetStakingKeeper() + bondDenom, err := sk.BondDenom(ctx) + Expect(err).To(BeNil(), "failed to get bond denom") + Expect(bondDenom).ToNot(BeEmpty(), "bond denom cannot be empty") + + tokenPairID := nw.App.GetErc20Keeper().GetTokenPairID(ctx, bondDenom) + tokenPair, found := nw.App.GetErc20Keeper().GetTokenPair(ctx, tokenPairID) + Expect(found).To(BeTrue(), "failed to find token pair for bond denom") + bondTokenPC := erc20.NewPrecompile( + tokenPair, + nw.App.GetBankKeeper(), + nw.App.GetErc20Keeper(), + nw.App.GetTransferKeeper(), + ) + + poolContract, err := compiledcontracts.LoadCommunityPool() + Expect(err).To(BeNil(), "failed to load CommunityPool compiled contract") + + s.network = nw + s.factory = tf + s.grpcHandler = gh + s.keyring = keys + s.bondDenom = bondDenom + s.bondTokenAddr = tokenPair.GetERC20Contract() + s.bondTokenPC = bondTokenPC + s.communityPoolContract = poolContract +} diff --git a/tests/integration/precompiles/communitypool/test_utils.go b/tests/integration/precompiles/communitypool/test_utils.go new file mode 100644 index 00000000..6d873183 --- /dev/null +++ b/tests/integration/precompiles/communitypool/test_utils.go @@ -0,0 +1,300 @@ +package communitypool + +import ( + "bytes" + "math/big" + "strings" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + . "github.com/onsi/gomega" + + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + "github.com/cosmos/evm/precompiles/erc20" + testutiltypes "github.com/cosmos/evm/testutil/types" + evmtypes "github.com/cosmos/evm/x/vm/types" +) + +type withdrawRequestView struct { + Owner common.Address + AmountOut *big.Int + Maturity uint64 + ReserveMoved bool + Claimed bool +} + +// deployCommunityPool deploys CommunityPool with deterministic defaults used in tests. +func (s *IntegrationTestSuite) deployCommunityPool( + ownerIdx int, + maxRetrieve uint32, + maxValidators uint32, + minStakeAmount *big.Int, +) common.Address { + owner := s.keyring.GetKey(ownerIdx) + addr, err := s.factory.DeployContract( + owner.Priv, + evmtypes.EvmTxArgs{}, + testutiltypes.ContractDeploymentData{ + Contract: s.communityPoolContract, + ConstructorArgs: []interface{}{ + s.bondTokenAddr, + maxRetrieve, + maxValidators, + minStakeAmount, + owner.Addr, + }, + }, + ) + Expect(err).To(BeNil(), "failed to deploy CommunityPool") + Expect(s.network.NextBlock()).To(BeNil(), "failed to commit deployment block") + return addr +} + +func buildCallArgs(contract evmtypes.CompiledContract, method string, args ...interface{}) testutiltypes.CallArgs { + return testutiltypes.CallArgs{ + ContractABI: contract.ABI, + MethodName: method, + Args: args, + } +} + +func buildTxArgs(contractAddr common.Address) evmtypes.EvmTxArgs { + return evmtypes.EvmTxArgs{ + To: &contractAddr, + } +} + +func (s *IntegrationTestSuite) approveBondToken( + ownerIdx int, + spender common.Address, + amount *big.Int, +) { + owner := s.keyring.GetKey(ownerIdx) + txArgs := buildTxArgs(s.bondTokenAddr) + callArgs := testutiltypes.CallArgs{ + ContractABI: s.bondTokenPC.ABI, + MethodName: erc20.ApproveMethod, + Args: []interface{}{spender, amount}, + } + + s.execTxExpectSuccess(owner.Priv, txArgs, callArgs) + Expect(s.network.NextBlock()).To(BeNil(), "failed to commit approve tx") +} + +func (s *IntegrationTestSuite) queryPoolUint( + callerIdx int, + contractAddr common.Address, + method string, + args ...interface{}, +) *big.Int { + _ = callerIdx + txArgs := buildTxArgs(contractAddr) + callArgs := buildCallArgs(s.communityPoolContract, method, args...) + + ethRes, err := s.factory.QueryContract(txArgs, callArgs, 0) + Expect(err).To(BeNil(), "query call failed") + + out, err := s.communityPoolContract.ABI.Unpack(method, ethRes.Ret) + Expect(err).To(BeNil(), "failed to unpack query output") + Expect(out).ToNot(BeEmpty(), "empty query output") + + switch value := out[0].(type) { + case *big.Int: + return value + case uint8: + return new(big.Int).SetUint64(uint64(value)) + case uint16: + return new(big.Int).SetUint64(uint64(value)) + case uint32: + return new(big.Int).SetUint64(uint64(value)) + case uint64: + return new(big.Int).SetUint64(value) + default: + Expect(false).To(BeTrue(), "unexpected query output type") + return nil + } +} + +func (s *IntegrationTestSuite) queryPoolAddress( + contractAddr common.Address, + method string, + args ...interface{}, +) common.Address { + txArgs := buildTxArgs(contractAddr) + callArgs := buildCallArgs(s.communityPoolContract, method, args...) + + ethRes, err := s.factory.QueryContract(txArgs, callArgs, 0) + Expect(err).To(BeNil(), "query call failed") + + out, err := s.communityPoolContract.ABI.Unpack(method, ethRes.Ret) + Expect(err).To(BeNil(), "failed to unpack query output") + Expect(out).To(HaveLen(1), "unexpected query output length") + + addr, ok := out[0].(common.Address) + Expect(ok).To(BeTrue(), "unexpected query output type") + return addr +} + +func (s *IntegrationTestSuite) execTxExpectSuccess( + priv cryptotypes.PrivKey, + txArgs evmtypes.EvmTxArgs, + callArgs testutiltypes.CallArgs, +) { + ethRes := s.execTxAndGetEthResponse(priv, txArgs, callArgs) + Expect(ethRes.VmError).To(BeEmpty(), "unexpected EVM execution revert") +} + +func (s *IntegrationTestSuite) execTxAndGetEthResponse( + priv cryptotypes.PrivKey, + txArgs evmtypes.EvmTxArgs, + callArgs testutiltypes.CallArgs, +) *evmtypes.MsgEthereumTxResponse { + if txArgs.GasLimit == 0 { + txArgs.GasLimit = 2_000_000 + } + res, err := s.factory.ExecuteContractCall(priv, txArgs, callArgs) + Expect(err).To(BeNil(), "expected tx execution success") + + ethRes, err := evmtypes.DecodeTxResponse(res.Data) + Expect(err).To(BeNil(), "failed to decode ethereum tx response") + return ethRes +} + +func (s *IntegrationTestSuite) findEventLog( + ethRes *evmtypes.MsgEthereumTxResponse, + emitter common.Address, + event abi.Event, +) *evmtypes.Log { + for _, lg := range ethRes.Logs { + if !strings.EqualFold(lg.Address, emitter.Hex()) { + continue + } + if len(lg.Topics) == 0 { + continue + } + if strings.EqualFold(lg.Topics[0], event.ID.Hex()) { + return lg + } + } + return nil +} + +func (s *IntegrationTestSuite) execTxExpectCustomError( + priv cryptotypes.PrivKey, + txArgs evmtypes.EvmTxArgs, + callArgs testutiltypes.CallArgs, + errorSignature string, +) { + if txArgs.GasLimit == 0 { + txArgs.GasLimit = 2_000_000 + } + res, err := s.factory.ExecuteContractCall(priv, txArgs, callArgs) + Expect(err).To(BeNil(), "expected tx execution to return response for revert checks") + + ethRes, err := evmtypes.DecodeTxResponse(res.Data) + Expect(err).To(BeNil(), "failed to decode ethereum tx response") + Expect(ethRes.VmError).To(ContainSubstring(vm.ErrExecutionReverted.Error())) + Expect(len(ethRes.Ret)).To(BeNumerically(">=", 4), "revert payload too short for custom error selector") + + expectedSelector := crypto.Keccak256([]byte(errorSignature))[:4] + Expect(bytes.Equal(ethRes.Ret[:4], expectedSelector)). + To(BeTrue(), "expected custom error %s (selector %x), got selector %x", errorSignature, expectedSelector, ethRes.Ret[:4]) +} + +func (s *IntegrationTestSuite) queryBondTokenBalance(addr common.Address) *big.Int { + ethRes, err := s.factory.QueryContract( + buildTxArgs(s.bondTokenAddr), + testutiltypes.CallArgs{ + ContractABI: s.bondTokenPC.ABI, + MethodName: erc20.BalanceOfMethod, + Args: []interface{}{addr}, + }, + 0, + ) + Expect(err).To(BeNil(), "failed querying bond token balance") + + out, err := s.bondTokenPC.ABI.Unpack(erc20.BalanceOfMethod, ethRes.Ret) + Expect(err).To(BeNil(), "failed to unpack bond token balance") + Expect(out).To(HaveLen(1)) + bal, ok := out[0].(*big.Int) + Expect(ok).To(BeTrue(), "unexpected balance output type") + return bal +} + +func (s *IntegrationTestSuite) queryWithdrawRequest(contractAddr common.Address, requestID *big.Int) withdrawRequestView { + ethRes, err := s.factory.QueryContract( + buildTxArgs(contractAddr), + buildCallArgs(s.communityPoolContract, "withdrawRequests", requestID), + 0, + ) + Expect(err).To(BeNil(), "failed querying withdraw request") + + out, err := s.communityPoolContract.ABI.Unpack("withdrawRequests", ethRes.Ret) + Expect(err).To(BeNil(), "failed to unpack withdraw request") + Expect(out).To(HaveLen(5)) + + owner, ok := out[0].(common.Address) + Expect(ok).To(BeTrue(), "unexpected owner type") + amountOut, ok := out[1].(*big.Int) + Expect(ok).To(BeTrue(), "unexpected amountOut type") + + var maturity uint64 + switch t := out[2].(type) { + case uint64: + maturity = t + case *big.Int: + maturity = t.Uint64() + default: + Expect(false).To(BeTrue(), "unexpected maturity type") + } + + reserveMoved, ok := out[3].(bool) + Expect(ok).To(BeTrue(), "unexpected reserveMoved type") + claimed, ok := out[4].(bool) + Expect(ok).To(BeTrue(), "unexpected claimed type") + + return withdrawRequestView{ + Owner: owner, + AmountOut: amountOut, + Maturity: maturity, + ReserveMoved: reserveMoved, + Claimed: claimed, + } +} + +func (s *IntegrationTestSuite) advanceToMaturity(maturity uint64) { + now := uint64(s.network.GetContext().BlockTime().Unix()) + if maturity <= now { + return + } + delta := time.Duration(maturity-now+1) * time.Second + Expect(s.network.NextBlockAfter(delta)).To(BeNil(), "failed to advance block time to maturity") +} + +func (s *IntegrationTestSuite) assertPoolInvariants(poolAddr common.Address) { + liquid := s.queryPoolUint(0, poolAddr, "liquidBalance") + rewardReserve := s.queryPoolUint(0, poolAddr, "rewardReserve") + maturedReserve := s.queryPoolUint(0, poolAddr, "maturedWithdrawReserve") + pendingReserve := s.queryPoolUint(0, poolAddr, "pendingWithdrawReserve") + ledger := s.queryPoolUint(0, poolAddr, "stakeablePrincipalLedger") + commitments := s.queryPoolUint(0, poolAddr, "totalWithdrawCommitments") + + // rewardReserve <= liquidBalance + Expect(rewardReserve.Cmp(liquid)).To(BeNumerically("<=", 0)) + + // rewardReserve + maturedWithdrawReserve <= liquidBalance + reserved := new(big.Int).Add(new(big.Int).Set(rewardReserve), maturedReserve) + Expect(reserved.Cmp(liquid)).To(BeNumerically("<=", 0)) + + // stakeablePrincipalLedger + rewardReserve + maturedWithdrawReserve <= liquidBalance + accounted := new(big.Int).Add(new(big.Int).Set(ledger), rewardReserve) + accounted.Add(accounted, maturedReserve) + Expect(accounted.Cmp(liquid)).To(BeNumerically("<=", 0)) + + // totalWithdrawCommitments == pendingWithdrawReserve + maturedWithdrawReserve + expectedCommitments := new(big.Int).Add(new(big.Int).Set(pendingReserve), maturedReserve) + Expect(commitments.String()).To(Equal(expectedCommitments.String())) +} diff --git a/tests/integration/precompiles/staking/test_integration.go b/tests/integration/precompiles/staking/test_integration.go index 085a8852..e82b5afe 100644 --- a/tests/integration/precompiles/staking/test_integration.go +++ b/tests/integration/precompiles/staking/test_integration.go @@ -546,6 +546,480 @@ func TestPrecompileIntegrationTestSuite(t *testing.T, create network.CreateEvmAp }) }) + Describe("to delegate equally to bonded validators", func() { + BeforeEach(func() { + callArgs.MethodName = staking.DelegateToBondedValidatorsMethod + }) + + It("should delegate with equal split and deterministic remainder", func() { + newAddr, newAddrPriv := testutiltx.NewAccAddressAndKey() + err := utils.FundAccountWithBaseDenom(s.factory, s.network, s.keyring.GetKey(0), newAddr, math.NewInt(2e18)) + Expect(err).To(BeNil(), "error while funding account") + Expect(s.network.NextBlock()).To(BeNil()) + + callArgs.Args = []interface{}{ + common.BytesToAddress(newAddr), + big.NewInt(5), + uint32(2), + } + logCheckArgs := passCheck.WithExpEvents(staking.EventTypeDelegate, staking.EventTypeDelegate) + + _, ethRes, err := s.factory.CallContractAndCheckLogs( + newAddrPriv, + txArgs, + callArgs, + logCheckArgs, + ) + Expect(err).To(BeNil(), "error while calling the smart contract: %v", err) + Expect(s.network.NextBlock()).To(BeNil()) + + unpacked, err := s.precompile.ABI.Unpack(staking.DelegateToBondedValidatorsMethod, ethRes.Ret) + Expect(err).To(BeNil(), "error while unpacking tx output") + Expect(unpacked).To(HaveLen(2)) + + delegatedAmount, ok := unpacked[0].(*big.Int) + Expect(ok).To(BeTrue(), "expected delegatedAmount to be *big.Int") + validatorsUsed, ok := unpacked[1].(uint32) + Expect(ok).To(BeTrue(), "expected validatorsUsed to be uint32") + Expect(delegatedAmount).To(Equal(big.NewInt(5))) + Expect(validatorsUsed).To(Equal(uint32(2))) + + qc := s.network.GetStakingClient() + valsRes, err := qc.Validators(s.network.GetContext(), &stakingtypes.QueryValidatorsRequest{ + Status: stakingtypes.BondStatusBonded, + Pagination: &query.PageRequest{ + Limit: 2, + }, + }) + Expect(err).To(BeNil()) + Expect(valsRes.Validators).To(HaveLen(2)) + + // deterministic remainder policy: for amount=5 and n=2 => [3,2] + exp := []int64{3, 2} + for i, v := range valsRes.Validators { + delRes, delErr := s.grpcHandler.GetDelegation(newAddr.String(), v.OperatorAddress) + Expect(delErr).To(BeNil(), "expected delegation for validator %s", v.OperatorAddress) + Expect(delRes.DelegationResponse.Balance.Amount.Int64()).To(Equal(exp[i])) + } + }) + + It("should honor maxValidators cap", func() { + newAddr, newAddrPriv := testutiltx.NewAccAddressAndKey() + err := utils.FundAccountWithBaseDenom(s.factory, s.network, s.keyring.GetKey(0), newAddr, math.NewInt(2e18)) + Expect(err).To(BeNil(), "error while funding account") + Expect(s.network.NextBlock()).To(BeNil()) + + callArgs.Args = []interface{}{ + common.BytesToAddress(newAddr), + big.NewInt(9), + uint32(1), + } + logCheckArgs := passCheck.WithExpEvents(staking.EventTypeDelegate) + + _, ethRes, err := s.factory.CallContractAndCheckLogs( + newAddrPriv, + txArgs, + callArgs, + logCheckArgs, + ) + Expect(err).To(BeNil(), "error while calling the smart contract: %v", err) + Expect(s.network.NextBlock()).To(BeNil()) + + unpacked, err := s.precompile.ABI.Unpack(staking.DelegateToBondedValidatorsMethod, ethRes.Ret) + Expect(err).To(BeNil(), "error while unpacking tx output") + delegatedAmount, ok := unpacked[0].(*big.Int) + Expect(ok).To(BeTrue()) + validatorsUsed, ok := unpacked[1].(uint32) + Expect(ok).To(BeTrue()) + Expect(delegatedAmount).To(Equal(big.NewInt(9))) + Expect(validatorsUsed).To(Equal(uint32(1))) + }) + + It("should use fewer validators when amount is smaller than maxValidators", func() { + newAddr, newAddrPriv := testutiltx.NewAccAddressAndKey() + err := utils.FundAccountWithBaseDenom(s.factory, s.network, s.keyring.GetKey(0), newAddr, math.NewInt(2e18)) + Expect(err).To(BeNil(), "error while funding account") + Expect(s.network.NextBlock()).To(BeNil()) + + callArgs.Args = []interface{}{ + common.BytesToAddress(newAddr), + big.NewInt(1), + uint32(2), + } + logCheckArgs := passCheck.WithExpEvents(staking.EventTypeDelegate) + + _, ethRes, err := s.factory.CallContractAndCheckLogs( + newAddrPriv, + txArgs, + callArgs, + logCheckArgs, + ) + Expect(err).To(BeNil(), "error while calling the smart contract: %v", err) + Expect(s.network.NextBlock()).To(BeNil()) + + unpacked, err := s.precompile.ABI.Unpack(staking.DelegateToBondedValidatorsMethod, ethRes.Ret) + Expect(err).To(BeNil(), "error while unpacking tx output") + delegatedAmount, ok := unpacked[0].(*big.Int) + Expect(ok).To(BeTrue()) + validatorsUsed, ok := unpacked[1].(uint32) + Expect(ok).To(BeTrue()) + Expect(delegatedAmount).To(Equal(big.NewInt(1))) + Expect(validatorsUsed).To(Equal(uint32(1))) + }) + + It("should fail when caller is different from delegator address", func() { + delegator := s.keyring.GetKey(0) + differentAddr := testutiltx.GenerateAddress() + + callArgs.Args = []interface{}{ + differentAddr, + big.NewInt(10), + uint32(2), + } + logCheckArgs := defaultLogCheck.WithErrContains( + fmt.Sprintf(cmn.ErrRequesterIsNotMsgSender, delegator.Addr, differentAddr), + ) + + _, _, err := s.factory.CallContractAndCheckLogs( + delegator.Priv, + txArgs, + callArgs, + logCheckArgs, + ) + Expect(err).To(BeNil(), "error while calling the smart contract: %v", err) + }) + + It("should fail when maxValidators is zero", func() { + delegator := s.keyring.GetKey(0) + + callArgs.Args = []interface{}{ + delegator.Addr, + big.NewInt(10), + uint32(0), + } + logCheckArgs := defaultLogCheck.WithErrContains("maxValidators must be greater than zero") + + _, _, err := s.factory.CallContractAndCheckLogs( + delegator.Priv, + txArgs, + callArgs, + logCheckArgs, + ) + Expect(err).To(BeNil(), "error while calling the smart contract: %v", err) + }) + + It("should fail when account balance is insufficient", func() { + newAddr, newAddrPriv := testutiltx.NewAccAddressAndKey() + err := utils.FundAccountWithBaseDenom(s.factory, s.network, s.keyring.GetKey(0), newAddr, math.NewInt(1)) + Expect(err).To(BeNil(), "error while funding account") + Expect(s.network.NextBlock()).To(BeNil()) + + callArgs.Args = []interface{}{ + common.BytesToAddress(newAddr), + big.NewInt(2), + uint32(2), + } + logCheckArgs := defaultLogCheck.WithErrContains("insufficient funds") + + _, _, err = s.factory.CallContractAndCheckLogs( + newAddrPriv, + txArgs, + callArgs, + logCheckArgs, + ) + Expect(err).To(BeNil(), "error while calling the smart contract: %v", err) + + // Atomicity assertion: failed call must not persist any delegation state. + qc := s.network.GetStakingClient() + valsRes, qErr := qc.Validators(s.network.GetContext(), &stakingtypes.QueryValidatorsRequest{ + Status: stakingtypes.BondStatusBonded, + Pagination: &query.PageRequest{ + Limit: 2, + }, + }) + Expect(qErr).To(BeNil()) + Expect(valsRes.Validators).To(HaveLen(2)) + for _, v := range valsRes.Validators { + _, delErr := s.grpcHandler.GetDelegation(newAddr.String(), v.OperatorAddress) + Expect(delErr).ToNot(BeNil(), "expected no delegation persisted for validator %s", v.OperatorAddress) + } + }) + }) + + Describe("to undelegate across bonded validators", func() { + var ( + setupDelegations = func() (sdk.AccAddress, *ethsecp256k1.PrivKey) { + newAddr, newAddrPriv := testutiltx.NewAccAddressAndKey() + err := utils.FundAccountWithBaseDenom(s.factory, s.network, s.keyring.GetKey(0), newAddr, math.NewInt(4e18)) + Expect(err).To(BeNil(), "error while funding account") + Expect(s.network.NextBlock()).To(BeNil()) + + callArgs.MethodName = staking.DelegateMethod + callArgs.Args = []interface{}{common.BytesToAddress(newAddr), valAddr.String(), big.NewInt(2e18)} + _, _, err = s.factory.CallContractAndCheckLogs( + newAddrPriv, + txArgs, + callArgs, + passCheck.WithExpEvents(staking.EventTypeDelegate), + ) + Expect(err).To(BeNil(), "error while delegating to first validator") + Expect(s.network.NextBlock()).To(BeNil()) + + callArgs.Args = []interface{}{common.BytesToAddress(newAddr), valAddr2.String(), big.NewInt(1e18)} + _, _, err = s.factory.CallContractAndCheckLogs( + newAddrPriv, + txArgs, + callArgs, + passCheck.WithExpEvents(staking.EventTypeDelegate), + ) + Expect(err).To(BeNil(), "error while delegating to second validator") + Expect(s.network.NextBlock()).To(BeNil()) + + return newAddr, newAddrPriv + } + amountForValidator = func(res *stakingtypes.QueryDelegatorDelegationsResponse, validatorAddr string) *big.Int { + for _, dr := range res.DelegationResponses { + if dr.Delegation.ValidatorAddress == validatorAddr { + return dr.Balance.Amount.BigInt() + } + } + return big.NewInt(0) + } + ) + + BeforeEach(func() { + callArgs.MethodName = staking.UndelegateFromBondedValidatorsMethod + }) + + It("should undelegate largest-first and return tuple", func() { + newAddr, newAddrPriv := setupDelegations() + prevGasLimit := txArgs.GasLimit + txArgs.GasLimit = 5_000_000 + defer func() { txArgs.GasLimit = prevGasLimit }() + + requested := big.NewInt(2500000000000000000) // 2.5e18 + callArgs.MethodName = staking.UndelegateFromBondedValidatorsMethod + callArgs.Args = []interface{}{ + common.BytesToAddress(newAddr), + requested, + uint32(2), + } + + _, ethRes, err := s.factory.CallContractAndCheckLogs( + newAddrPriv, + txArgs, + callArgs, + passCheck.WithExpEvents(staking.EventTypeUnbond, staking.EventTypeUnbond), + ) + Expect(err).To(BeNil(), "error while calling undelegateFromBondedValidators") + Expect(s.network.NextBlock()).To(BeNil()) + + unpacked, err := s.precompile.ABI.Unpack(staking.UndelegateFromBondedValidatorsMethod, ethRes.Ret) + Expect(err).To(BeNil(), "error while unpacking tx output") + Expect(unpacked).To(HaveLen(3)) + + undelegatedAmount, ok := unpacked[0].(*big.Int) + Expect(ok).To(BeTrue(), "expected undelegatedAmount to be *big.Int") + validatorsUsed, ok := unpacked[1].(uint32) + Expect(ok).To(BeTrue(), "expected validatorsUsed to be uint32") + maturityTime, ok := unpacked[2].(int64) + Expect(ok).To(BeTrue(), "expected maturityTime to be int64") + Expect(undelegatedAmount).To(Equal(requested)) + Expect(validatorsUsed).To(Equal(uint32(2))) + Expect(maturityTime).To(BeNumerically(">", 0)) + + res, err := s.grpcHandler.GetDelegatorDelegations(newAddr.String()) + Expect(err).To(BeNil(), "failed querying delegator delegations") + + // Largest-first expectation with initial balances [2e18, 1e18] and request 2.5e18: + // first validator drained to 0, second reduced to 0.5e18. + Expect(amountForValidator(res, valAddr.String())).To(Equal(big.NewInt(0))) + Expect(amountForValidator(res, valAddr2.String())).To(Equal(big.NewInt(500000000000000000))) + + // Invariant: precompile returns the maximum completion time across all + // undelegation steps, so pool callers can wait for a single maturity timestamp. + ubdRes, ubdErr := s.grpcHandler.GetDelegatorUnbondingDelegations(newAddr.String()) + Expect(ubdErr).To(BeNil(), "failed querying unbonding delegations") + maxCompletion := int64(0) + for _, ubd := range ubdRes.UnbondingResponses { + for _, entry := range ubd.Entries { + completion := entry.CompletionTime.UTC().Unix() + if completion > maxCompletion { + maxCompletion = completion + } + } + } + Expect(maxCompletion).To(BeNumerically(">", 0)) + Expect(maturityTime).To(Equal(maxCompletion)) + }) + + It("should break ties by validator address ascending", func() { + newAddr, newAddrPriv := testutiltx.NewAccAddressAndKey() + err := utils.FundAccountWithBaseDenom(s.factory, s.network, s.keyring.GetKey(0), newAddr, math.NewInt(3e18)) + Expect(err).To(BeNil(), "error while funding account") + Expect(s.network.NextBlock()).To(BeNil()) + + // Equal delegations to both validators so address order decides selection. + callArgs.MethodName = staking.DelegateMethod + callArgs.Args = []interface{}{common.BytesToAddress(newAddr), valAddr.String(), big.NewInt(1e18)} + _, _, err = s.factory.CallContractAndCheckLogs( + newAddrPriv, + txArgs, + callArgs, + passCheck.WithExpEvents(staking.EventTypeDelegate), + ) + Expect(err).To(BeNil(), "error while delegating to first validator") + Expect(s.network.NextBlock()).To(BeNil()) + + callArgs.Args = []interface{}{common.BytesToAddress(newAddr), valAddr2.String(), big.NewInt(1e18)} + _, _, err = s.factory.CallContractAndCheckLogs( + newAddrPriv, + txArgs, + callArgs, + passCheck.WithExpEvents(staking.EventTypeDelegate), + ) + Expect(err).To(BeNil(), "error while delegating to second validator") + Expect(s.network.NextBlock()).To(BeNil()) + + prevGasLimit := txArgs.GasLimit + txArgs.GasLimit = 5_000_000 + defer func() { txArgs.GasLimit = prevGasLimit }() + + callArgs.MethodName = staking.UndelegateFromBondedValidatorsMethod + callArgs.Args = []interface{}{ + common.BytesToAddress(newAddr), + big.NewInt(1500000000000000000), // 1.5e18 + uint32(2), + } + _, _, err = s.factory.CallContractAndCheckLogs( + newAddrPriv, + txArgs, + callArgs, + passCheck.WithExpEvents(staking.EventTypeUnbond, staking.EventTypeUnbond), + ) + Expect(err).To(BeNil(), "error while calling tie-break undelegation") + Expect(s.network.NextBlock()).To(BeNil()) + + first := valAddr.String() + second := valAddr2.String() + if second < first { + first, second = second, first + } + + res, qErr := s.grpcHandler.GetDelegatorDelegations(newAddr.String()) + Expect(qErr).To(BeNil(), "failed querying delegator delegations") + + // Tie-break expectation: + // - lexicographically first validator is drained first (1e18 -> 0) + // - second validator provides the remainder (1e18 -> 0.5e18) + Expect(amountForValidator(res, first)).To(Equal(big.NewInt(0))) + Expect(amountForValidator(res, second)).To(Equal(big.NewInt(500000000000000000))) + }) + + It("should revert when maxValidators cap prevents exact undelegation", func() { + newAddr, newAddrPriv := setupDelegations() + prevGasLimit := txArgs.GasLimit + txArgs.GasLimit = 5_000_000 + defer func() { txArgs.GasLimit = prevGasLimit }() + + callArgs.MethodName = staking.UndelegateFromBondedValidatorsMethod + callArgs.Args = []interface{}{ + common.BytesToAddress(newAddr), + big.NewInt(2500000000000000000), // 2.5e18 + uint32(1), // cap prevents full amount + } + + _, _, err := s.factory.CallContractAndCheckLogs( + newAddrPriv, + txArgs, + callArgs, + defaultLogCheck.WithErrContains("insufficient bonded delegations to undelegate requested amount"), + ) + Expect(err).To(BeNil(), "error while checking capped undelegation revert") + Expect(s.network.NextBlock()).To(BeNil()) + + // Atomicity: state remains unchanged on revert. + res, qErr := s.grpcHandler.GetDelegatorDelegations(newAddr.String()) + Expect(qErr).To(BeNil()) + Expect(amountForValidator(res, valAddr.String())).To(Equal(big.NewInt(2e18))) + Expect(amountForValidator(res, valAddr2.String())).To(Equal(big.NewInt(1e18))) + }) + + It("should revert when caller is different from delegator address", func() { + newAddr, _ := setupDelegations() + delegator := s.keyring.GetKey(0) + prevGasLimit := txArgs.GasLimit + txArgs.GasLimit = 5_000_000 + defer func() { txArgs.GasLimit = prevGasLimit }() + + callArgs.MethodName = staking.UndelegateFromBondedValidatorsMethod + callArgs.Args = []interface{}{ + common.BytesToAddress(newAddr), + big.NewInt(1e18), + uint32(2), + } + + _, _, err := s.factory.CallContractAndCheckLogs( + delegator.Priv, + txArgs, + callArgs, + defaultLogCheck.WithErrContains( + fmt.Sprintf(cmn.ErrRequesterIsNotMsgSender, delegator.Addr, common.BytesToAddress(newAddr).String()), + ), + ) + Expect(err).To(BeNil(), "error while checking requester mismatch revert") + }) + + It("should revert when maxValidators is zero", func() { + newAddr, newAddrPriv := setupDelegations() + prevGasLimit := txArgs.GasLimit + txArgs.GasLimit = 5_000_000 + defer func() { txArgs.GasLimit = prevGasLimit }() + + callArgs.MethodName = staking.UndelegateFromBondedValidatorsMethod + callArgs.Args = []interface{}{ + common.BytesToAddress(newAddr), + big.NewInt(1e18), + uint32(0), + } + + _, _, err := s.factory.CallContractAndCheckLogs( + newAddrPriv, + txArgs, + callArgs, + defaultLogCheck.WithErrContains("maxValidators must be greater than zero"), + ) + Expect(err).To(BeNil(), "error while checking maxValidators validation") + }) + + It("should revert when no bonded delegations exist", func() { + newAddr, newAddrPriv := testutiltx.NewAccAddressAndKey() + err := utils.FundAccountWithBaseDenom(s.factory, s.network, s.keyring.GetKey(0), newAddr, math.NewInt(1e18)) + Expect(err).To(BeNil(), "error while funding account") + Expect(s.network.NextBlock()).To(BeNil()) + + prevGasLimit := txArgs.GasLimit + txArgs.GasLimit = 5_000_000 + defer func() { txArgs.GasLimit = prevGasLimit }() + + callArgs.MethodName = staking.UndelegateFromBondedValidatorsMethod + callArgs.Args = []interface{}{ + common.BytesToAddress(newAddr), + big.NewInt(1), + uint32(2), + } + + _, _, err = s.factory.CallContractAndCheckLogs( + newAddrPriv, + txArgs, + callArgs, + defaultLogCheck.WithErrContains("no bonded delegations found"), + ) + Expect(err).To(BeNil(), "error while checking no bonded delegations revert") + }) + + }) + Describe("to redelegate", func() { BeforeEach(func() { callArgs.MethodName = staking.RedelegateMethod diff --git a/tests/integration/precompiles/staking/test_staking.go b/tests/integration/precompiles/staking/test_staking.go index 7bfe0490..21ebf76b 100644 --- a/tests/integration/precompiles/staking/test_staking.go +++ b/tests/integration/precompiles/staking/test_staking.go @@ -381,7 +381,7 @@ func (s *PrecompileTestSuite) TestRun() { s.Require().NoError(err, "failed to pack input") return input }, - 21559, // use enough gas to avoid out of gas error + 21787, // tuned to avoid out of gas while preserving gas-consumption assertion true, false, "write protection", @@ -391,7 +391,7 @@ func (s *PrecompileTestSuite) TestRun() { func(_ keyring.Key) []byte { return []byte("invalid") }, - 21559, // use enough gas to avoid out of gas error + 21787, // tuned to avoid out of gas while preserving gas-consumption assertion false, false, "no method with id", diff --git a/tests/integration/x/poolrebalancer/stub_evm_keeper.go b/tests/integration/x/poolrebalancer/stub_evm_keeper.go new file mode 100644 index 00000000..a3c4213f --- /dev/null +++ b/tests/integration/x/poolrebalancer/stub_evm_keeper.go @@ -0,0 +1,37 @@ +package poolrebalancer + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + + poolrebalancertypes "github.com/cosmos/evm/x/poolrebalancer/types" + evmtypes "github.com/cosmos/evm/x/vm/types" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// rebalanceIntegrationStubEVM implements poolrebalancertypes.EVMKeeper for this package only. +// Used with a nil account keeper in test_suite so a prefunded keyring delegator can be the pool +// address without failing the user-pubkey check. These tests target rebalance scheduling, queues, +// and staking—not CommunityPool calldata or real VM execution (see precompiles/communitypool). +type rebalanceIntegrationStubEVM struct{} + +func (rebalanceIntegrationStubEVM) CallEVM( + _ sdk.Context, + _ abi.ABI, + _, _ common.Address, + _ bool, + _ *big.Int, + _ string, + _ ...any, +) (*evmtypes.MsgEthereumTxResponse, error) { + return &evmtypes.MsgEthereumTxResponse{}, nil +} + +func (rebalanceIntegrationStubEVM) IsContract(sdk.Context, common.Address) bool { + return true +} + +var _ poolrebalancertypes.EVMKeeper = rebalanceIntegrationStubEVM{} diff --git a/tests/integration/x/poolrebalancer/test_case_a_scheduling.go b/tests/integration/x/poolrebalancer/test_case_a_scheduling.go new file mode 100644 index 00000000..c476168b --- /dev/null +++ b/tests/integration/x/poolrebalancer/test_case_a_scheduling.go @@ -0,0 +1,77 @@ +package poolrebalancer + +import ( + sdkmath "cosmossdk.io/math" + + "github.com/cosmos/evm/testutil/integration/evm/utils" + poolrebalancertypes "github.com/cosmos/evm/x/poolrebalancer/types" +) + +// TestSchedulingA_DriftCreatesPendingRedelegations verifies that measurable drift +// produces at least one pending redelegation for the pool delegator. +func (s *KeeperIntegrationTestSuite) TestSchedulingA_DriftCreatesPendingRedelegations() { + // Any drift should schedule with bp=0. + params := s.DefaultEnabledParams( + 0, // rebalance_threshold_bp + 1, // max_ops_per_block + sdkmath.ZeroInt(), + ) + s.EnableRebalancer(params) + + src := s.validators[0] + s.DelegateExtraToValidator(src) + s.T().Logf("scheduling-case: drift pushed to %s", src.OperatorAddress) + + s.Require().NoError(s.RunBeginThenEndBlock()) + pending := s.PendingRedelegations() + s.T().Logf("scheduling-case: pending redelegations=%d", len(pending)) + + events := s.ctx.EventManager().Events().ToABCIEvents() + s.Require().True(utils.ContainsEventType(events, poolrebalancertypes.EventTypeRedelegationStarted)) + s.Require().True(utils.ContainsEventType(events, poolrebalancertypes.EventTypeRebalanceSummary)) + + s.Require().NotEmpty(pending, "expected at least one pending redelegation") + + // Spot-check one entry shape. + ctx := s.network.GetContext() + found := false + for _, e := range pending { + if e.DelegatorAddress == s.poolDel.String() { + s.Require().Equal(s.bondDenom, e.Amount.Denom) + s.Require().True(e.CompletionTime.After(ctx.BlockTime())) + found = true + break + } + } + s.Require().True(found, "expected pool delegator entries in pending redelegations") + + s.Require().GreaterOrEqual(len(pending), 1) +} + +// TestSchedulingA_ReducesSourceOverweightInStakingState verifies a successful scheduling +// pass reduces overweight stake on the drifted source validator in staking state. +func (s *KeeperIntegrationTestSuite) TestSchedulingA_ReducesSourceOverweightInStakingState() { + params := s.DefaultEnabledParams(0, 1, sdkmath.ZeroInt()) + s.EnableRebalancer(params) + + src := s.validators[0] + srcAddr := src.OperatorAddress + s.DelegateExtraToValidator(src) + + before, _, err := s.poolKeeper.GetDelegatorStakeByValidator(s.ctx, s.poolDel) + s.Require().NoError(err) + beforeSrc := before[srcAddr] + s.Require().True(beforeSrc.IsPositive(), "expected positive source stake before scheduling") + + s.Require().NoError(s.RunBeginThenEndBlock()) + + after, _, err := s.poolKeeper.GetDelegatorStakeByValidator(s.ctx, s.poolDel) + s.Require().NoError(err) + afterSrc := after[srcAddr] + s.Require().True( + afterSrc.LT(beforeSrc), + "expected source stake to decrease after one rebalance op; before=%s after=%s", + beforeSrc.String(), + afterSrc.String(), + ) +} diff --git a/tests/integration/x/poolrebalancer/test_case_b_bounded_ops.go b/tests/integration/x/poolrebalancer/test_case_b_bounded_ops.go new file mode 100644 index 00000000..e588b5fe --- /dev/null +++ b/tests/integration/x/poolrebalancer/test_case_b_bounded_ops.go @@ -0,0 +1,37 @@ +package poolrebalancer + +import ( + sdkmath "cosmossdk.io/math" +) + +// TestBoundedOpsPerBlock_MaxOpsIsRespected verifies that max_ops_per_block limits +// scheduling to a single operation in one EndBlock pass. +func (s *KeeperIntegrationTestSuite) TestBoundedOpsPerBlock_MaxOpsIsRespected() { + // Keep scheduler aggressive; cap block work at one op. + params := s.DefaultEnabledParams( + 0, // rebalance_threshold_bp + 1, // max_ops_per_block + sdkmath.ZeroInt(), // max_move_per_op = 0 => no cap + ) + + s.EnableRebalancer(params) + + src := s.validators[0] + s.DelegateExtraToValidator(src) + s.T().Logf("bounded-ops: drift pushed to %s with maxOps=%d", src.OperatorAddress, params.MaxOpsPerBlock) + + s.Require().NoError(s.RunBeginThenEndBlock()) + + pending := s.PendingRedelegations() + s.T().Logf("bounded-ops: pending redelegations=%d", len(pending)) + s.Require().NotEmpty(pending, "expected at least one pending redelegation") + + // With max_ops_per_block=1 and no pre-existing entries, we should queue exactly one op. + // (addPendingRedelegation merges only when dst + completion match; with max_ops=1 it's a single move.) + s.Require().Len(pending, 1, "expected exactly one queued pending redelegation") + + // Quick shape check. + e := pending[0] + s.Require().Equal(s.poolDel.String(), e.DelegatorAddress) + s.Require().Equal(s.bondDenom, e.Amount.Denom) +} diff --git a/tests/integration/x/poolrebalancer/test_case_c_threshold.go b/tests/integration/x/poolrebalancer/test_case_c_threshold.go new file mode 100644 index 00000000..b787e542 --- /dev/null +++ b/tests/integration/x/poolrebalancer/test_case_c_threshold.go @@ -0,0 +1,66 @@ +package poolrebalancer + +import ( + sdkmath "cosmossdk.io/math" +) + +// TestThresholdBehavior_HighThresholdPreventsScheduling verifies that a very high +// threshold suppresses scheduling even when drift exists. +func (s *KeeperIntegrationTestSuite) TestThresholdBehavior_HighThresholdPreventsScheduling() { + // Keep threshold effectively at "do nothing" level. + params := s.DefaultEnabledParams( + 10000, // rebalance_threshold_bp + 1, // max_ops_per_block + sdkmath.ZeroInt(), + ) + s.EnableRebalancer(params) + + // Add drift; with this threshold we still expect a no-op. + src := s.validators[0] + s.DelegateExtraToValidator(src) + + s.Require().NoError(s.RunBeginThenEndBlock()) + + red := s.PendingRedelegations() + + s.Require().Len(red, 0, "expected no pending redelegations under high threshold") + +} + +// TestThresholdBehavior_BoundaryPair_NoOpThenSchedules verifies boundary behavior: +// same drift is ignored at high threshold and scheduled after threshold is lowered. +func (s *KeeperIntegrationTestSuite) TestThresholdBehavior_BoundaryPair_NoOpThenSchedules() { + // Same drift, two threshold values. + high := s.DefaultEnabledParams( + 10000, // threshold == total stake (effectively suppresses scheduling) + 1, + sdkmath.ZeroInt(), + ) + s.EnableRebalancer(high) + + src := s.validators[0] + s.DelegateExtraToValidator(src) + s.T().Logf( + "drift injected on %s (bp=%d), pending before: redelegations=%d", + src.OperatorAddress, high.RebalanceThresholdBp, len(s.PendingRedelegations()), + ) + + s.Require().NoError(s.RunBeginThenEndBlock()) + s.Require().Len(s.PendingRedelegations(), 0, "expected no scheduling under high threshold") + s.T().Logf( + "high-threshold pass stayed idle: redelegations=%d", + len(s.PendingRedelegations()), + ) + + // Lower threshold without changing the drift; scheduler should now engage. + low := high + low.RebalanceThresholdBp = 0 + s.EnableRebalancer(low) + + s.Require().NoError(s.RunBeginThenEndBlock()) + s.Require().NotEmpty(s.PendingRedelegations(), "expected scheduling after lowering threshold") + s.T().Logf( + "after lowering to bp=%d: redelegations=%d", + low.RebalanceThresholdBp, len(s.PendingRedelegations()), + ) +} diff --git a/tests/integration/x/poolrebalancer/test_case_d_transitive_safety.go b/tests/integration/x/poolrebalancer/test_case_d_transitive_safety.go new file mode 100644 index 00000000..822c4c65 --- /dev/null +++ b/tests/integration/x/poolrebalancer/test_case_d_transitive_safety.go @@ -0,0 +1,130 @@ +package poolrebalancer + +import ( + "time" + + sdkmath "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + + poolrebalancertypes "github.com/cosmos/evm/x/poolrebalancer/types" +) + +// TestTransitiveSafety_BlockedWhileDstImmature verifies that redelegation from a +// source validator is blocked while an immature redelegation already targets it. +func (s *KeeperIntegrationTestSuite) TestTransitiveSafety_BlockedWhileDstImmature() { + // Redelegation-only mode: this test exercises blocking semantics only. + params := s.DefaultEnabledParams( + 0, // threshold + 1, // max ops + sdkmath.ZeroInt(), + ) + s.EnableRebalancer(params) + + xVal := s.validators[0] + yVal := s.validators[1] + xSDKValAddr := s.MustValAddr(xVal.OperatorAddress) + + // Seed immature dst=xVal; any new src=xVal redelegation should be blocked. + immatureCompletion := s.ctx.BlockTime().Add(s.unbondingSec) + s.SeedPendingRedelegation(poolrebalancertypes.PendingRedelegation{ + DelegatorAddress: s.poolDel.String(), + SrcValidatorAddress: yVal.OperatorAddress, + DstValidatorAddress: xVal.OperatorAddress, + Amount: sdk.NewCoin(s.bondDenom, sdkmath.OneInt()), + CompletionTime: immatureCompletion.UTC(), + }) + + // Make xVal overweight so it is a real source candidate. + s.DelegateExtraToValidator(xVal) + + // Guard against vacuous pass: xVal must be overweight and some dst must need stake. + deltas := s.ComputeCurrentDeltas() + xDelta, ok := deltas[xVal.OperatorAddress] + s.Require().True(ok, "expected xVal delta to exist") + s.Require().True(xDelta.IsNegative(), "expected xVal to be overweight/source candidate") + s.Require().True(s.HasPositiveDelta(deltas), "expected at least one underweight destination") + s.T().Logf( + "blocked-case setup: x=%s y=%s xDelta=%s hasDstNeedingStake=%t pendingBefore=%d", + xVal.OperatorAddress, yVal.OperatorAddress, xDelta.String(), s.HasPositiveDelta(deltas), len(s.PendingRedelegations()), + ) + + s.Require().NoError(s.RunBeginThenEndBlock()) + + pending := s.PendingRedelegations() + + // Core invariant: while dst=xVal is immature, no pending move may use src=xVal. + for _, e := range pending { + s.Require().NotEqual(xVal.OperatorAddress, e.SrcValidatorAddress, "found pending redelegation with src=xVal while dst=xVal is immature") + } + + // Seeded immature entry must still exist. + seedFound := false + for _, e := range pending { + if e.SrcValidatorAddress == yVal.OperatorAddress && e.DstValidatorAddress == xVal.OperatorAddress { + seedFound = true + break + } + } + s.Require().True(seedFound, "expected seeded immature redelegation into xVal to remain") + + // Immature condition should still hold at this point. + s.Require().True(s.poolKeeper.HasImmatureRedelegationTo(s.ctx, s.poolDel, xSDKValAddr, s.bondDenom)) + s.T().Logf("blocked-case result: pendingAfter=%d (no src=%s moves)", len(pending), xVal.OperatorAddress) +} + +// TestTransitiveSafety_UnblocksAfterDstMaturity verifies that once the immature +// destination entry matures, redelegation from that source can be scheduled again. +func (s *KeeperIntegrationTestSuite) TestTransitiveSafety_UnblocksAfterDstMaturity() { + // Same starting setup as blocked case. + params := s.DefaultEnabledParams(0, 1, sdkmath.ZeroInt()) + s.EnableRebalancer(params) + + xVal := s.validators[0] + yVal := s.validators[1] + xSDKValAddr := s.MustValAddr(xVal.OperatorAddress) + + immatureCompletion := s.ctx.BlockTime().Add(s.unbondingSec) + s.SeedPendingRedelegation(poolrebalancertypes.PendingRedelegation{ + DelegatorAddress: s.poolDel.String(), + SrcValidatorAddress: yVal.OperatorAddress, + DstValidatorAddress: xVal.OperatorAddress, + Amount: sdk.NewCoin(s.bondDenom, sdkmath.OneInt()), + CompletionTime: immatureCompletion.UTC(), + }) + s.DelegateExtraToValidator(xVal) + + // Guard against vacuous pass before the blocked run. + deltas := s.ComputeCurrentDeltas() + xDelta, ok := deltas[xVal.OperatorAddress] + s.Require().True(ok, "expected xVal delta to exist") + s.Require().True(xDelta.IsNegative(), "expected xVal to be overweight/source candidate") + s.Require().True(s.HasPositiveDelta(deltas), "expected at least one underweight destination") + s.T().Logf( + "unblock-case setup: x=%s y=%s xDelta=%s hasDstNeedingStake=%t pendingBefore=%d", + xVal.OperatorAddress, yVal.OperatorAddress, xDelta.String(), s.HasPositiveDelta(deltas), len(s.PendingRedelegations()), + ) + + // First pass: still blocked by immature dst=xVal. + s.Require().NoError(s.RunBeginThenEndBlock()) + s.Require().True(s.poolKeeper.HasImmatureRedelegationTo(s.ctx, s.poolDel, xSDKValAddr, s.bondDenom)) + + // Move past completion so the seed can mature and get cleaned up. + // Only pending redelegation tracking entries are expected to mature here. + s.WithBlockTime(immatureCompletion.Add(1 * time.Second)) + s.Require().NoError(s.RunBeginThenEndBlock()) + + // Immature block should now be gone. + s.Require().False(s.poolKeeper.HasImmatureRedelegationTo(s.ctx, s.poolDel, xSDKValAddr, s.bondDenom)) + + // Once unblocked, scheduler should be free to pick src=xVal. + pending := s.PendingRedelegations() + srcFound := false + for _, e := range pending { + if e.SrcValidatorAddress == xVal.OperatorAddress { + srcFound = true + break + } + } + s.Require().True(srcFound, "expected module to schedule a redelegation from xVal after maturity") + s.T().Logf("unblock-case result: pendingAfter=%d src=%sScheduled=%t", len(pending), xVal.OperatorAddress, srcFound) +} diff --git a/tests/integration/x/poolrebalancer/test_case_disabled_noop.go b/tests/integration/x/poolrebalancer/test_case_disabled_noop.go new file mode 100644 index 00000000..190226c7 --- /dev/null +++ b/tests/integration/x/poolrebalancer/test_case_disabled_noop.go @@ -0,0 +1,28 @@ +package poolrebalancer + +import ( + poolrebalancertypes "github.com/cosmos/evm/x/poolrebalancer/types" +) + +// TestDisabledNoOp_NoPendingQueues verifies that an empty pool delegator address +// disables rebalancing and leaves pending queues untouched. +func (s *KeeperIntegrationTestSuite) TestDisabledNoOp_NoPendingQueues() { + ctx := s.network.GetContext() + + // Explicitly disable by clearing pool delegator address. + s.EnableRebalancer(poolrebalancertypes.DefaultParams()) // baseline + p := s.DisabledParams() + s.EnableRebalancer(p) + s.T().Logf("disabled-case: pool delegator=%q", p.PoolDelegatorAddress) + + s.Require().NoError(s.RunBeginThenEndBlock()) + + red := s.PendingRedelegations() + s.T().Logf("disabled-case: pending after EndBlock redelegations=%d", len(red)) + s.Require().Len(red, 0) + + // Sanity: ensure we did not accidentally enable it. + params, err := s.poolKeeper.GetParams(ctx) + s.Require().NoError(err) + s.Require().Empty(params.PoolDelegatorAddress) +} diff --git a/tests/integration/x/poolrebalancer/test_case_g_long_horizon_convergence.go b/tests/integration/x/poolrebalancer/test_case_g_long_horizon_convergence.go new file mode 100644 index 00000000..38132d4a --- /dev/null +++ b/tests/integration/x/poolrebalancer/test_case_g_long_horizon_convergence.go @@ -0,0 +1,112 @@ +package poolrebalancer + +import ( + "time" + + sdkmath "cosmossdk.io/math" +) + +// maxAbsDelta returns max(|delta|) across all validators. +func maxAbsDelta(deltas map[string]sdkmath.Int) sdkmath.Int { + max := sdkmath.ZeroInt() + for _, d := range deltas { + abs := d.Abs() + if abs.GT(max) { + max = abs + } + } + return max +} + +// TestLongHorizonConvergence_RedelegationOnly verifies repeated Begin+End passes +// with periodic maturity windows reduce drift to a small tolerance using redelegations only. +// Each iteration still calls BeginBlock, matching production ordering before EndBlock. +func (s *KeeperIntegrationTestSuite) TestLongHorizonConvergence_RedelegationOnly() { + params := s.DefaultEnabledParams( + 0, // threshold: schedule on any drift + 1, // force gradual per-pass progress to exercise long-horizon behavior + sdkmath.NewInt(100000000000000000), // cap per-op movement to require multiple iterations + ) + s.EnableRebalancer(params) + + // Create deterministic overweight drift on one source validator. + src := s.validators[0] + s.DelegateExtraToValidator(src) + + const ( + maxIters = 60 + maturityJumpEvery = 5 + // Keep practical tolerance to absorb truncation/rounding residue. + convergenceTolerance = int64(10) + // Guard against vacuous success: start from a non-trivial drift. + minInitialDrift = int64(1000) + // Ensure this is a long-horizon behavior test, not a one-iteration pass. + minItersBeforeConverged = 3 + ) + tol := sdkmath.NewInt(convergenceTolerance) + minStart := sdkmath.NewInt(minInitialDrift) + + initialDeltas := s.ComputeCurrentDeltas() + initialMaxAbs := maxAbsDelta(initialDeltas) + s.Require().True(initialMaxAbs.IsPositive(), "expected initial non-zero drift") + s.Require().True( + initialMaxAbs.GTE(minStart), + "expected non-trivial initial drift; got %s want >= %s", + initialMaxAbs.String(), + minStart.String(), + ) + + converged := false + convergedAt := 0 + sawProgress := false + for i := 1; i <= maxIters; i++ { + s.Require().NoError(s.RunBeginThenEndBlock()) + + // Periodically move past unbonding window so queued ops can mature and cleanup can proceed. + if i%maturityJumpEvery == 0 { + s.WithBlockTime(s.ctx.BlockTime().Add(s.unbondingSec + time.Second)) + } + + deltas := s.ComputeCurrentDeltas() + curMaxAbs := maxAbsDelta(deltas) + pendingRed := len(s.PendingRedelegations()) + s.T().Logf( + "convergence iter=%d maxAbsDelta=%s tol=%s pending(redelegations=%d)", + i, curMaxAbs.String(), tol.String(), pendingRed, + ) + + if curMaxAbs.LT(initialMaxAbs) { + sawProgress = true + } + + if curMaxAbs.LTE(tol) { + converged = true + convergedAt = i + break + } + } + + s.Require().True( + converged, + "expected convergence within %d iterations (initial maxAbs=%s, tolerance=%s)", + maxIters, + initialMaxAbs.String(), + tol.String(), + ) + s.Require().GreaterOrEqual( + convergedAt, + minItersBeforeConverged, + "converged too quickly at iter=%d; expected long-horizon behavior (>= %d iters)", + convergedAt, + minItersBeforeConverged, + ) + s.Require().True( + sawProgress, + "expected at least one measurable improvement from initial maxAbsDelta=%s", + initialMaxAbs.String(), + ) + + // Final maturity pass to ensure no stale queue buildup remains. + s.WithBlockTime(s.ctx.BlockTime().Add(s.unbondingSec + time.Second)) + s.Require().NoError(s.RunBeginThenEndBlock()) +} diff --git a/tests/integration/x/poolrebalancer/test_case_h_previous_block_slash.go b/tests/integration/x/poolrebalancer/test_case_h_previous_block_slash.go new file mode 100644 index 00000000..052176c4 --- /dev/null +++ b/tests/integration/x/poolrebalancer/test_case_h_previous_block_slash.go @@ -0,0 +1,93 @@ +package poolrebalancer + +import ( + sdkmath "cosmossdk.io/math" +) + +// TestPreviousBlockSlash_PrioritizesRedelegation verifies that a validator slashed in block H +// is chosen as the source for the first rebalance op scheduled in block H+1. +func (s *KeeperIntegrationTestSuite) TestPreviousBlockSlash_PrioritizesRedelegation() { + params := s.DefaultEnabledParams( + 0, + 1, + sdkmath.ZeroInt(), + ) + s.EnableRebalancer(params) + + slashedVal := s.validators[0] + otherOverweight := s.validators[1] + s.DelegateExtraToValidator(slashedVal) + s.DelegateExtraToValidator(otherOverweight) + s.RecordSlashEvent(slashedVal) + + s.WithBlockHeight(s.ctx.BlockHeight() + 1) + s.Require().NoError(s.RunBeginThenEndBlock()) + + redels := s.PendingRedelegations() + s.Require().Len(redels, 1, "max_ops_per_block=1 should queue exactly one redelegation") + s.Require().Equal(slashedVal.OperatorAddress, redels[0].SrcValidatorAddress, "previous-block slashed validator should be the first source") +} + +// TestPreviousBlockSlash_ExcludedFromDestinations verifies that the rebalance logic does not +// choose a slashed validator as a same-block destination. +func (s *KeeperIntegrationTestSuite) TestPreviousBlockSlash_ExcludedFromDestinations() { + params := s.DefaultEnabledParams( + 0, + 2, + sdkmath.ZeroInt(), + ) + s.EnableRebalancer(params) + + src := s.validators[0] + slashedDst := s.validators[1] + s.DelegateExtraToValidator(src) + s.RecordSlashEvent(slashedDst) + + s.WithBlockHeight(s.ctx.BlockHeight() + 1) + s.Require().NoError(s.RunBeginThenEndBlock()) + + redels := s.PendingRedelegations() + s.Require().NotEmpty(redels, "expected at least one pending redelegation") + for _, r := range redels { + s.Require().NotEqual(slashedDst.OperatorAddress, r.DstValidatorAddress, "slashed validator must not be used as destination") + } +} + +// TestPreviousBlockSlash_RespectsMaxOps verifies that slash-priority still obeys max_ops_per_block. +func (s *KeeperIntegrationTestSuite) TestPreviousBlockSlash_RespectsMaxOps() { + params := s.DefaultEnabledParams( + 0, + 1, + sdkmath.ZeroInt(), + ) + s.EnableRebalancer(params) + + slashedVal := s.validators[0] + s.DelegateExtraToValidator(slashedVal) + s.RecordSlashEvent(slashedVal) + + s.WithBlockHeight(s.ctx.BlockHeight() + 1) + s.Require().NoError(s.RunBeginThenEndBlock()) + + s.Require().Len(s.PendingRedelegations(), 1, "slash-priority must still honor max_ops_per_block") +} + +// TestPreviousBlockSlash_NoSlashRegression verifies that existing scheduling behavior still works +// when no slash event was recorded in the previous block. +func (s *KeeperIntegrationTestSuite) TestPreviousBlockSlash_NoSlashRegression() { + params := s.DefaultEnabledParams( + 0, + 1, + sdkmath.ZeroInt(), + ) + s.EnableRebalancer(params) + + src := s.validators[0] + s.DelegateExtraToValidator(src) + + s.Require().NoError(s.RunBeginThenEndBlock()) + + redels := s.PendingRedelegations() + s.Require().Len(redels, 1, "expected normal scheduling without prior slash") + s.Require().Equal(src.OperatorAddress, redels[0].SrcValidatorAddress) +} diff --git a/tests/integration/x/poolrebalancer/test_case_update_params_integration.go b/tests/integration/x/poolrebalancer/test_case_update_params_integration.go new file mode 100644 index 00000000..6ebfd098 --- /dev/null +++ b/tests/integration/x/poolrebalancer/test_case_update_params_integration.go @@ -0,0 +1,69 @@ +package poolrebalancer + +import ( + "bytes" + + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + + poolrebalancertypes "github.com/cosmos/evm/x/poolrebalancer/types" +) + +// TestUpdateParams_RejectsInvalidAuthority verifies that MsgUpdateParams enforces +// module authority and rejects unauthorized callers. +func (s *KeeperIntegrationTestSuite) TestUpdateParams_RejectsInvalidAuthority() { + params := s.DefaultEnabledParams(0, 1, sdkmath.ZeroInt()) + + msg := &poolrebalancertypes.MsgUpdateParams{ + Authority: sdk.AccAddress(bytes.Repeat([]byte{8}, 20)).String(), + Params: params, + } + + _, err := s.poolKeeper.UpdateParams(s.ctx, msg) + s.Require().Error(err) + s.Require().Contains(err.Error(), "invalid authority") + s.T().Logf("update-params auth-check: invalid authority rejected as expected") +} + +// TestUpdateParams_ValidAuthorityChangesSchedulingBehavior verifies that a valid +// params update changes runtime scheduling behavior in the same test setup. +func (s *KeeperIntegrationTestSuite) TestUpdateParams_ValidAuthorityChangesSchedulingBehavior() { + authority := authtypes.NewModuleAddress(govtypes.ModuleName).String() + + // Reuse the same drift across both phases. + src := s.validators[0] + s.DelegateExtraToValidator(src) + s.T().Logf("update-params flow: drift pushed to %s", src.OperatorAddress) + + // Phase 1: high threshold, expect no scheduling. + high := s.DefaultEnabledParams( + 10000, // threshold suppresses all scheduling + 1, + sdkmath.ZeroInt(), + ) + _, err := s.poolKeeper.UpdateParams(s.ctx, &poolrebalancertypes.MsgUpdateParams{ + Authority: authority, + Params: high, + }) + s.Require().NoError(err) + + s.Require().NoError(s.RunBeginThenEndBlock()) + s.Require().Len(s.PendingRedelegations(), 0, "expected no scheduling under high threshold") + s.T().Logf("update-params flow: high threshold kept queues empty") + + // Phase 2: lower threshold, same drift should now schedule. + low := high + low.RebalanceThresholdBp = 0 + _, err = s.poolKeeper.UpdateParams(s.ctx, &poolrebalancertypes.MsgUpdateParams{ + Authority: authority, + Params: low, + }) + s.Require().NoError(err) + + s.Require().NoError(s.RunBeginThenEndBlock()) + s.Require().NotEmpty(s.PendingRedelegations(), "expected scheduling after lowering threshold") + s.T().Logf("update-params flow: low threshold scheduled %d redelegations", len(s.PendingRedelegations())) +} diff --git a/tests/integration/x/poolrebalancer/test_endblock_helpers.go b/tests/integration/x/poolrebalancer/test_endblock_helpers.go new file mode 100644 index 00000000..238dfed2 --- /dev/null +++ b/tests/integration/x/poolrebalancer/test_endblock_helpers.go @@ -0,0 +1,38 @@ +package poolrebalancer + +import ( + "time" + + mod "github.com/cosmos/evm/x/poolrebalancer" +) + +// RunEndBlock runs only poolrebalancer EndBlocker on s.ctx (same store view as direct keeper tests). +// +// Prefer RunBeginThenEndBlock for normal cases to match production ordering. +// Use RunEndBlock only to assert EndBlock-only failure (missing slash snapshot) or for +// focused cases where BeginBlock effects are intentionally excluded. +func (s *KeeperIntegrationTestSuite) RunEndBlock() error { + return mod.EndBlocker(s.ctx, s.poolKeeper) +} + +// RunBeginThenEndBlock runs poolrebalancer BeginBlocker then EndBlocker on s.ctx. +// This matches production ABCI ordering and should be the default for integration cases. +// It is required for tests that rely on previous-block slash snapshot semantics. +func (s *KeeperIntegrationTestSuite) RunBeginThenEndBlock() error { + if err := mod.BeginBlocker(s.ctx, s.poolKeeper); err != nil { + return err + } + return mod.EndBlocker(s.ctx, s.poolKeeper) +} + +// WithBlockTime moves the suite context clock without advancing the full network. +// It is used when we only need time-based maturity behavior. +func (s *KeeperIntegrationTestSuite) WithBlockTime(t time.Time) { + s.ctx = s.ctx.WithBlockTime(t) +} + +// WithBlockHeight moves the suite context height without refreshing from the network. +// It is used when tests need previous-block semantics over the same in-memory store view. +func (s *KeeperIntegrationTestSuite) WithBlockHeight(h int64) { + s.ctx = s.ctx.WithBlockHeight(h) +} diff --git a/tests/integration/x/poolrebalancer/test_helpers.go b/tests/integration/x/poolrebalancer/test_helpers.go new file mode 100644 index 00000000..3297f10c --- /dev/null +++ b/tests/integration/x/poolrebalancer/test_helpers.go @@ -0,0 +1,136 @@ +package poolrebalancer + +import ( + "cosmossdk.io/math" + "sort" + sdkmath "cosmossdk.io/math" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + sdk "github.com/cosmos/cosmos-sdk/types" + distributiontypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + + poolrebalancertypes "github.com/cosmos/evm/x/poolrebalancer/types" +) + +// PendingRedelegations returns all pending redelegations, failing test on query error. +func (s *KeeperIntegrationTestSuite) PendingRedelegations() []poolrebalancertypes.PendingRedelegation { + out, err := s.poolKeeper.GetAllPendingRedelegations(s.ctx) + s.Require().NoError(err) + return out +} + +// DelegateExtraToValidator creates deterministic drift by adding extra stake on one validator. +// Amount selection tries to be large enough to survive truncation in stake math. +func (s *KeeperIntegrationTestSuite) DelegateExtraToValidator(val stakingtypes.Validator) { + // Rebalancer math uses truncated token amounts; too-small moves can disappear. + free := s.network.App.GetBankKeeper().GetBalance(s.ctx, s.poolDel, s.bondDenom).Amount + s.Require().True(free.IsPositive(), "pool delegator free balance must be > 0") + + stakeByVal, _, err := s.poolKeeper.GetDelegatorStakeByValidator(s.ctx, s.poolDel) + s.Require().NoError(err) + base := stakeByVal[val.OperatorAddress] + s.Require().True(base.IsPositive(), "expected base stake for chosen validator") + + // Use roughly existing stake as drift target, bounded by free balance. + extra := base + if free.LT(extra) { + extra = free + } + + s.Require().True(extra.IsPositive(), "drift delegation amount must be > 0") + + _, err = s.network.App.GetStakingKeeper().Delegate( + s.ctx, + s.poolDel, + extra, + stakingtypes.Unspecified, + val, + true, + ) + s.Require().NoError(err) +} + +// SeedPendingRedelegation inserts a pending redelegation fixture entry. +func (s *KeeperIntegrationTestSuite) SeedPendingRedelegation(entry poolrebalancertypes.PendingRedelegation) { + s.Require().NoError(s.poolKeeper.SetPendingRedelegation(s.ctx, entry)) +} + +// ComputeCurrentDeltas mirrors ProcessRebalance inputs and returns target-current deltas. +func (s *KeeperIntegrationTestSuite) ComputeCurrentDeltas() map[string]sdkmath.Int { + targetVals, err := s.poolKeeper.GetTargetBondedValidators(s.ctx) + s.Require().NoError(err) + s.Require().NotEmpty(targetVals) + + current, total, err := s.poolKeeper.GetDelegatorStakeByValidator(s.ctx, s.poolDel) + s.Require().NoError(err) + s.Require().True(total.IsPositive()) + + target, err := s.poolKeeper.EqualWeightTarget(total, targetVals) + s.Require().NoError(err) + + params, err := s.poolKeeper.GetParams(s.ctx) + s.Require().NoError(err) + + deltas, err := s.poolKeeper.ComputeDeltas(target, current, total, params.RebalanceThresholdBp) + s.Require().NoError(err) + return deltas +} + +// HasPositiveDelta reports whether any validator is currently underweight. +func (s *KeeperIntegrationTestSuite) HasPositiveDelta(deltas map[string]sdkmath.Int) bool { + for _, d := range deltas { + if d.IsPositive() { + return true + } + } + return false +} + +// OverweightValidatorSet builds a quick lookup of validators with negative deltas. +func (s *KeeperIntegrationTestSuite) OverweightValidatorSet(deltas map[string]sdkmath.Int) map[string]struct{} { + out := make(map[string]struct{}) + for val, d := range deltas { + if d.IsNegative() { + out[val] = struct{}{} + } + } + return out +} + +// MustValAddr parses valoper bech32 and fails the test on invalid input. +func (s *KeeperIntegrationTestSuite) MustValAddr(bech32 string) sdk.ValAddress { + val, err := sdk.ValAddressFromBech32(bech32) + s.Require().NoError(err) + return val +} + +// RecordSlashEvent stores a distribution slash event for the validator at the current block height. +func (s *KeeperIntegrationTestSuite) RecordSlashEvent(val stakingtypes.Validator) { + valAddr := s.MustValAddr(val.OperatorAddress) + err := s.network.App.GetDistrKeeper().SetValidatorSlashEvent( + s.ctx, + valAddr, + uint64(s.ctx.BlockHeight()), + 1, + distributiontypes.ValidatorSlashEvent{ + ValidatorPeriod: uint64(s.ctx.BlockHeight()), + Fraction: math.LegacyNewDec(1), + }, + ) + s.Require().NoError(err) +} + +// RedelegationSrcDstPairs returns queued redelegation pairs sorted by source then destination. +func (s *KeeperIntegrationTestSuite) RedelegationSrcDstPairs() [][2]string { + redels := s.PendingRedelegations() + out := make([][2]string, 0, len(redels)) + for _, r := range redels { + out = append(out, [2]string{r.SrcValidatorAddress, r.DstValidatorAddress}) + } + sort.Slice(out, func(i, j int) bool { + if out[i][0] == out[j][0] { + return out[i][1] < out[j][1] + } + return out[i][0] < out[j][0] + }) + return out +} diff --git a/tests/integration/x/poolrebalancer/test_suite.go b/tests/integration/x/poolrebalancer/test_suite.go new file mode 100644 index 00000000..6d1439c0 --- /dev/null +++ b/tests/integration/x/poolrebalancer/test_suite.go @@ -0,0 +1,152 @@ +package poolrebalancer + +import ( + "time" + + "github.com/stretchr/testify/suite" + + "github.com/cosmos/evm/testutil/integration/evm/network" + testkeyring "github.com/cosmos/evm/testutil/keyring" + + poolrebalancerkeeper "github.com/cosmos/evm/x/poolrebalancer/keeper" + poolrebalancertypes "github.com/cosmos/evm/x/poolrebalancer/types" + + sdkmath "cosmossdk.io/math" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +type KeeperIntegrationTestSuite struct { + suite.Suite + + create network.CreateEvmApp + options []network.ConfigOption + + network *network.UnitTestNetwork + keyring testkeyring.Keyring + poolKeeper poolrebalancerkeeper.Keeper + ctx sdk.Context + + poolDel sdk.AccAddress + validators []stakingtypes.Validator + bondDenom string + unbondingSec time.Duration + maxEntries uint32 +} + +// NewKeeperIntegrationTestSuite wires app factory + optional network config for each test case. +func NewKeeperIntegrationTestSuite(create network.CreateEvmApp, options ...network.ConfigOption) *KeeperIntegrationTestSuite { + return &KeeperIntegrationTestSuite{ + create: create, + options: options, + } +} + +func (s *KeeperIntegrationTestSuite) SetupTest() { + if s.create == nil { + panic("Create app must be set") + } + + s.keyring = testkeyring.New(2) + opts := []network.ConfigOption{ + network.WithPreFundedAccounts(s.keyring.GetAllAccAddrs()...), + // Short unbonding belongs in genesis: tests that call Commit/FinalizeBlock need the value in + // loaded state, not only from a later SetParams on an in-memory ctx. + network.WithStakingUnbondingTime(30 * time.Second), + } + opts = append(opts, s.options...) + + s.network = network.NewUnitTestNetwork(s.create, opts...) + s.ctx = s.network.GetContext() + + // Keep unbonding short so maturity/cleanup tests run quickly. + s.unbondingSec = 30 * time.Second + s.maxEntries = 100 + + s.configureStakingParamsForTests() + s.configurePoolKeeper() + s.captureBaselineInfo() +} + +func (s *KeeperIntegrationTestSuite) configureStakingParamsForTests() { + sk := s.network.App.GetStakingKeeper() + sp, err := sk.GetParams(s.ctx) + s.Require().NoError(err) + sp.UnbondingTime = s.unbondingSec + sp.MaxEntries = s.maxEntries + s.Require().NoError(sk.SetParams(s.ctx, sp)) +} + +// configurePoolKeeper builds a keeper bound to the same module KV stores as the app under test. +func (s *KeeperIntegrationTestSuite) configurePoolKeeper() { + poolKey := s.network.App.GetKey(poolrebalancertypes.StoreKey) + storeService := runtime.NewKVStoreService(poolKey) + + authority := authtypes.NewModuleAddress(govtypes.ModuleName) + // No account keeper: skips validatePoolDelegatorAddress account/pubkey checks that would reject the + // prefunded test keyring. Stub EVM still reports IsContract true for the pool address. + s.poolKeeper = poolrebalancerkeeper.NewKeeper( + s.network.App.AppCodec(), + storeService, + s.network.App.GetTKey(poolrebalancertypes.TransientStoreKey), + s.network.App.GetStakingKeeper(), + stakingkeeper.NewQuerier(s.network.App.GetStakingKeeper()), + s.network.App.GetDistrKeeper(), + authority, + rebalanceIntegrationStubEVM{}, + nil, + ) +} + +// captureBaselineInfo caches common fixtures used by most test cases. +func (s *KeeperIntegrationTestSuite) captureBaselineInfo() { + s.validators = s.network.GetValidators() + s.Require().NotEmpty(s.validators, "network should initialize bonded validators") + + bondDenom, err := s.network.App.GetStakingKeeper().BondDenom(s.ctx) + s.Require().NoError(err) + s.bondDenom = bondDenom + + // UnitTestNetwork seeds delegations for the first test account; use it as pool delegator. + s.poolDel = s.keyring.GetAccAddr(0) + + // No stake would make rebalance tests vacuous. + _, total, err := s.poolKeeper.GetDelegatorStakeByValidator(s.ctx, s.poolDel) + s.Require().NoError(err) + s.Require().True(total.IsPositive(), "expected pool delegator stake to be > 0") +} + +// NextBlock0 advances one block with no extra time offset (block time unchanged) and refreshes s.ctx +// from the network so keeper calls see the new height and committed state. +func (s *KeeperIntegrationTestSuite) NextBlock0() { + s.Require().NoError(s.network.NextBlockAfter(0)) + s.ctx = s.network.GetContext() +} + +// EnableRebalancer writes module params for the current test. +func (s *KeeperIntegrationTestSuite) EnableRebalancer(params poolrebalancertypes.Params) { + s.Require().NoError(s.poolKeeper.SetParams(s.ctx, params)) +} + +// DisabledParams returns default params with pool delegator cleared. +func (s *KeeperIntegrationTestSuite) DisabledParams() poolrebalancertypes.Params { + p := poolrebalancertypes.DefaultParams() + p.PoolDelegatorAddress = "" + return p +} + +// DefaultEnabledParams returns a baseline enabled config with per-test overrides. +func (s *KeeperIntegrationTestSuite) DefaultEnabledParams(thresholdBP uint32, maxOpsPerBlock uint32, maxMovePerOp sdkmath.Int) poolrebalancertypes.Params { + p := poolrebalancertypes.DefaultParams() + p.PoolDelegatorAddress = s.poolDel.String() + p.MaxTargetValidators = uint32(len(s.validators)) + p.RebalanceThresholdBp = thresholdBP + p.MaxOpsPerBlock = maxOpsPerBlock + p.MaxMovePerOp = maxMovePerOp + return p +} diff --git a/testutil/integration/evm/network/config.go b/testutil/integration/evm/network/config.go index c49cee12..183b6ac3 100644 --- a/testutil/integration/evm/network/config.go +++ b/testutil/integration/evm/network/config.go @@ -3,6 +3,7 @@ package network import ( "fmt" "math/big" + "time" cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" @@ -45,6 +46,9 @@ type Config struct { otherCoinDenoms []string preFundedAccounts []sdktypes.AccAddress balances []banktypes.Balance + + // If > 0, overrides staking genesis UnbondingTime (DefaultParams uses the SDK default, ~21d). + stakingUnbondingTime time.Duration } type CustomGenesisState map[string]interface{} @@ -213,3 +217,13 @@ func WithConsensusParams(params *cmtproto.ConsensusParams) ConfigOption { } } } + +// WithStakingUnbondingTime sets staking UnbondingTime in the integration network genesis. +// Poolrebalancer and other suites that call FinalizeBlock between setup steps need a short +// unbonding window here: post-init keeper SetParams may not survive Commit the way direct +// EndBlock/DeliverTx paths do. +func WithStakingUnbondingTime(d time.Duration) ConfigOption { + return func(cfg *Config) { + cfg.stakingUnbondingTime = d + } +} diff --git a/testutil/integration/evm/network/network.go b/testutil/integration/evm/network/network.go index 907af989..97b617df 100644 --- a/testutil/integration/evm/network/network.go +++ b/testutil/integration/evm/network/network.go @@ -140,9 +140,10 @@ func (n *IntegrationNetwork) configureAndInitChain(evmApp evm.EvmApp) error { delegations := createDelegations(validators, genAccounts[0].GetAddress()) stakingParams := StakingCustomGenesisState{ - denom: n.cfg.chainCoins.BaseDenom(), - validators: validators, - delegations: delegations, + denom: n.cfg.chainCoins.BaseDenom(), + unbondingTime: n.cfg.stakingUnbondingTime, + validators: validators, + delegations: delegations, } govParams := GovCustomGenesisState{ denom: n.cfg.chainCoins.BaseDenom(), diff --git a/testutil/integration/evm/network/setup.go b/testutil/integration/evm/network/setup.go index 5a7a74d9..231cb5c4 100644 --- a/testutil/integration/evm/network/setup.go +++ b/testutil/integration/evm/network/setup.go @@ -274,6 +274,9 @@ func getValidatorsSlashingGen(validators []stakingtypes.Validator, sk slashingty type StakingCustomGenesisState struct { denom string + // unbondingTime, when > 0, overrides stakingtypes.DefaultParams().UnbondingTime. + unbondingTime time.Duration + validators []stakingtypes.Validator delegations []stakingtypes.Delegation } @@ -283,6 +286,9 @@ func setDefaultStakingGenesisState(cosmosEVMApp evm.EvmApp, genesisState testuti // Set staking params stakingParams := stakingtypes.DefaultParams() stakingParams.BondDenom = overwriteParams.denom + if overwriteParams.unbondingTime > 0 { + stakingParams.UnbondingTime = overwriteParams.unbondingTime + } stakingGenesis := stakingtypes.NewGenesisState( stakingParams, diff --git a/testutil/integration/evm/utils/params.go b/testutil/integration/evm/utils/params.go index 76f62b38..75f0b957 100644 --- a/testutil/integration/evm/utils/params.go +++ b/testutil/integration/evm/utils/params.go @@ -23,8 +23,6 @@ type UpdateParamsInput struct { Params interface{} } -var authority = authtypes.NewModuleAddress(govtypes.ModuleName).String() - // UpdateEvmParams helper function to update the EVM module parameters // It submits an update params proposal, votes for it, and waits till it passes func UpdateEvmParams(input UpdateParamsInput) error { @@ -70,6 +68,8 @@ func updateModuleParams[T interface{}](input UpdateParamsInput, moduleName strin // createProposalMsg creates the module-specific update params message func createProposalMsg(params interface{}, name string) sdk.Msg { + authority := authtypes.NewModuleAddress(govtypes.ModuleName).String() + switch name { case evmtypes.ModuleName: return &evmtypes.MsgUpdateParams{Authority: authority, Params: params.(evmtypes.Params)} diff --git a/x/poolrebalancer/abci.go b/x/poolrebalancer/abci.go new file mode 100644 index 00000000..d4b1568d --- /dev/null +++ b/x/poolrebalancer/abci.go @@ -0,0 +1,52 @@ +package poolrebalancer + +import ( + "github.com/cosmos/evm/x/poolrebalancer/keeper" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// ABCI: previous-block slash signals are snapshot in BeginBlock for EndBlock use. +// +// BeginBlock: PreparePreviousBlockSlashedValidators snapshots relevant validators with +// distribution slash events at height blockHeight-1. Ordered after slashing/evidence so +// recent slash state is visible before EndBlock rebalance consumes it. Errors halt the block. +// +// EndBlock: CompletePendingRedelegations removes matured tracking rows. ProcessRebalance then +// uses the slash snapshot to avoid same-block destinations into recently slashed validators and +// to prioritize moving stake away from them. MaybeReconcileCommunityPoolStakedBuckets runs before +// automation/rebalance; MaybeReconcileCommunityPoolStakedBucketsSecondPass may run after successful +// ProcessRebalance when test hook enables it. ABCI logs reconcile/automation/rebalance failures only — see +// docs/poolrebalancer/community_pool_runbook.md. + +// BeginBlocker snapshots previous-block slash signals into transient store. +func BeginBlocker(ctx sdk.Context, k keeper.Keeper) error { + if err := k.PreparePreviousBlockSlashedValidators(ctx); err != nil { + ctx.Logger().Error("poolrebalancer: prepare previous-block slashed validators failed", "err", err) + return err + } + return nil +} + +// EndBlocker: (1) CompletePendingRedelegations — strict (halt on error). +// (2) pre-reconcile, (3) automation, (4) rebalance (+ optional second reconcile) — best-effort (log only). +func EndBlocker(ctx sdk.Context, k keeper.Keeper) error { + if err := k.CompletePendingRedelegations(ctx); err != nil { + ctx.Logger().Error("poolrebalancer: complete pending redelegations failed", "err", err) + return err + } + if err := k.MaybeReconcileCommunityPoolStakedBuckets(ctx); err != nil { + ctx.Logger().Error("poolrebalancer: community pool total staked reconcile failed", "err", err) + } + if err := k.MaybeRunCommunityPoolAutomation(ctx); err != nil { + ctx.Logger().Error("poolrebalancer: community pool automation failed", "err", err) + } + if err := k.ProcessRebalance(ctx); err != nil { + ctx.Logger().Error("poolrebalancer: process rebalance failed", "err", err) + } else { + if err := k.MaybeReconcileCommunityPoolStakedBucketsSecondPass(ctx); err != nil { + ctx.Logger().Error("poolrebalancer: community pool staked buckets second reconcile failed", "err", err) + } + } + return nil +} diff --git a/x/poolrebalancer/abci_test.go b/x/poolrebalancer/abci_test.go new file mode 100644 index 00000000..330d8dc9 --- /dev/null +++ b/x/poolrebalancer/abci_test.go @@ -0,0 +1,131 @@ +package poolrebalancer + +import ( + "bytes" + "context" + "math/big" + "testing" + "time" + + "cosmossdk.io/math" + storetypes "cosmossdk.io/store/types" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + evmtypes "github.com/cosmos/evm/x/vm/types" + + "github.com/cosmos/evm/x/poolrebalancer/keeper" + "github.com/cosmos/evm/x/poolrebalancer/types" +) + +type abciMockStakingKeeper struct { + vals []stakingtypes.Validator + delegations []stakingtypes.Delegation +} + +func (m abciMockStakingKeeper) GetBondedValidatorsByPower(context.Context) ([]stakingtypes.Validator, error) { + return m.vals, nil +} +func (m abciMockStakingKeeper) GetDelegatorDelegations(context.Context, sdk.AccAddress, uint16) ([]stakingtypes.Delegation, error) { + return m.delegations, nil +} +func (abciMockStakingKeeper) GetValidator(context.Context, sdk.ValAddress) (stakingtypes.Validator, error) { + return stakingtypes.Validator{}, nil +} +func (abciMockStakingKeeper) GetDelegation(context.Context, sdk.AccAddress, sdk.ValAddress) (stakingtypes.Delegation, error) { + return stakingtypes.Delegation{}, nil +} +func (abciMockStakingKeeper) BeginRedelegation(context.Context, sdk.AccAddress, sdk.ValAddress, sdk.ValAddress, math.LegacyDec) (time.Time, error) { + return time.Time{}, nil +} +func (abciMockStakingKeeper) UnbondingTime(context.Context) (time.Duration, error) { return time.Hour, nil } +func (abciMockStakingKeeper) BondDenom(context.Context) (string, error) { return "stake", nil } +func (abciMockStakingKeeper) DelegatorDelegations(context.Context, *stakingtypes.QueryDelegatorDelegationsRequest) (*stakingtypes.QueryDelegatorDelegationsResponse, error) { + return &stakingtypes.QueryDelegatorDelegationsResponse{}, nil +} + +type abciMockEVM struct{ methods []string } + +func (m *abciMockEVM) CallEVM( + _ sdk.Context, + _ abi.ABI, + _, _ common.Address, + _ bool, + _ *big.Int, + method string, + _ ...any, +) (*evmtypes.MsgEthereumTxResponse, error) { + m.methods = append(m.methods, method) + var ret []byte + if method == "totalStaked" { + meth := types.CommunityPoolABI.Methods["totalStaked"] + enc, err := meth.Outputs.Pack(big.NewInt(1)) + if err != nil { + return nil, err + } + ret = enc + } + return &evmtypes.MsgEthereumTxResponse{Ret: ret}, nil +} +func (*abciMockEVM) IsContract(sdk.Context, common.Address) bool { return true } + +func countMethod(methods []string, name string) int { + n := 0 + for _, m := range methods { + if m == name { + n++ + } + } + return n +} + +func TestEndBlocker_UsesBondedOnlyReconcileMethod(t *testing.T) { + storeKey := storetypes.NewKVStoreKey(types.ModuleName) + tKey := storetypes.NewTransientStoreKey("transient_test") + ctx := testutil.DefaultContext(storeKey, tKey) + storeService := runtime.NewKVStoreService(storeKey) + cdc := moduletestutil.MakeTestEncodingConfig().Codec + authority := sdk.AccAddress(bytes.Repeat([]byte{9}, 20)) + evm := &abciMockEVM{} + sk := abciMockStakingKeeper{} + k := keeper.NewKeeper(cdc, storeService, tKey, sk, sk, nil, authority, evm, nil) + + params := types.DefaultParams() + params.PoolDelegatorAddress = sdk.AccAddress(bytes.Repeat([]byte{1}, 20)).String() + require.NoError(t, k.SetParams(ctx, params)) + require.NoError(t, k.MaybeReconcileCommunityPoolStakedBuckets(ctx)) + + require.Contains(t, evm.methods, "reconcileTotalStaked") +} + +func TestEndBlocker_SecondReconcileOnlyWhenSecondPassEnabled(t *testing.T) { + storeKey := storetypes.NewKVStoreKey(types.ModuleName) + tKey := storetypes.NewTransientStoreKey("transient_test") + ctx := testutil.DefaultContext(storeKey, tKey).WithBlockHeight(40) + storeService := runtime.NewKVStoreService(storeKey) + cdc := moduletestutil.MakeTestEncodingConfig().Codec + authority := sdk.AccAddress(bytes.Repeat([]byte{9}, 20)) + sk := abciMockStakingKeeper{} + + evmDisabled := &abciMockEVM{} + kDisabled := keeper.NewKeeper(cdc, storeService, tKey, sk, sk, nil, authority, evmDisabled, nil) + params := types.DefaultParams() + params.PoolDelegatorAddress = sdk.AccAddress(bytes.Repeat([]byte{1}, 20)).String() + require.NoError(t, kDisabled.SetParams(ctx, params)) + kDisabled.SetCommunityPoolReconcileSecondPassForTesting(false) + require.NoError(t, EndBlocker(ctx, kDisabled)) + require.Equal(t, 1, countMethod(evmDisabled.methods, "reconcileTotalStaked")) + + evmEnabled := &abciMockEVM{} + kEnabled := keeper.NewKeeper(cdc, storeService, tKey, sk, sk, nil, authority, evmEnabled, nil) + require.NoError(t, kEnabled.SetParams(ctx, params)) + kEnabled.SetCommunityPoolReconcileSecondPassForTesting(true) + require.NoError(t, EndBlocker(ctx, kEnabled)) + require.GreaterOrEqual(t, countMethod(evmEnabled.methods, "reconcileTotalStaked"), 2) +} diff --git a/x/poolrebalancer/client/cli/query.go b/x/poolrebalancer/client/cli/query.go new file mode 100644 index 00000000..0d8fb7f1 --- /dev/null +++ b/x/poolrebalancer/client/cli/query.go @@ -0,0 +1,87 @@ +package cli + +import ( + "context" + + "github.com/spf13/cobra" + + "github.com/cosmos/evm/x/poolrebalancer/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" +) + +// GetQueryCmd returns the root query command for the poolrebalancer module. +func GetQueryCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Poolrebalancer query commands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetParamsCmd(), + GetPendingRedelegationsCmd(), + ) + return cmd +} + +func GetParamsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Short: "Query module params", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + res, err := queryClient.Params(context.Background(), &types.QueryParamsRequest{}) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + return cmd +} + +func GetPendingRedelegationsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "pending-redelegations", + Short: "List pending redelegations", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + res, err := queryClient.PendingRedelegations(context.Background(), &types.QueryPendingRedelegationsRequest{ + Pagination: pageReq, + }) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + flags.AddPaginationFlagsToCmd(cmd, "pending-redelegations") + return cmd +} diff --git a/x/poolrebalancer/genesis.go b/x/poolrebalancer/genesis.go new file mode 100644 index 00000000..07135e32 --- /dev/null +++ b/x/poolrebalancer/genesis.go @@ -0,0 +1,41 @@ +package poolrebalancer + +import ( + "fmt" + + "github.com/cosmos/evm/x/poolrebalancer/keeper" + "github.com/cosmos/evm/x/poolrebalancer/types" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// InitGenesis initializes module state from genesis. +func InitGenesis(ctx sdk.Context, k keeper.Keeper, gs *types.GenesisState) { + if err := gs.Validate(); err != nil { + panic(fmt.Sprintf("failed to validate %s genesis state: %s", types.ModuleName, err)) + } + if err := k.SetParamsForGenesis(ctx, gs.Params); err != nil { + panic(fmt.Sprintf("failed to set %s params: %s", types.ModuleName, err)) + } + for _, entry := range gs.PendingRedelegations { + if err := k.SetPendingRedelegation(ctx, entry); err != nil { + panic(fmt.Sprintf("failed to restore pending redelegation: %s", err)) + } + } +} + +// ExportGenesis exports module state to genesis. +func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState { + params, err := k.GetParams(ctx) + if err != nil { + panic(fmt.Sprintf("failed to get %s params: %s", types.ModuleName, err)) + } + redelegations, err := k.GetAllPendingRedelegations(ctx) + if err != nil { + panic(fmt.Sprintf("failed to export pending redelegations: %s", err)) + } + return &types.GenesisState{ + Params: params, + PendingRedelegations: redelegations, + } +} diff --git a/x/poolrebalancer/genesis_test.go b/x/poolrebalancer/genesis_test.go new file mode 100644 index 00000000..9aca2891 --- /dev/null +++ b/x/poolrebalancer/genesis_test.go @@ -0,0 +1,84 @@ +package poolrebalancer + +import ( + "bytes" + "context" + "math/big" + "testing" + "time" + + "cosmossdk.io/math" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + storetypes "cosmossdk.io/store/types" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + evmtypes "github.com/cosmos/evm/x/vm/types" + + "github.com/cosmos/evm/x/poolrebalancer/keeper" + "github.com/cosmos/evm/x/poolrebalancer/types" +) + +type genesisMockStakingKeeper struct{} +type genesisMockEVM struct{} + +func (genesisMockStakingKeeper) GetBondedValidatorsByPower(context.Context) ([]stakingtypes.Validator, error) { + return nil, nil +} +func (genesisMockStakingKeeper) GetDelegatorDelegations(context.Context, sdk.AccAddress, uint16) ([]stakingtypes.Delegation, error) { + return nil, nil +} +func (genesisMockStakingKeeper) GetValidator(context.Context, sdk.ValAddress) (stakingtypes.Validator, error) { + return stakingtypes.Validator{}, nil +} +func (genesisMockStakingKeeper) GetDelegation(context.Context, sdk.AccAddress, sdk.ValAddress) (stakingtypes.Delegation, error) { + return stakingtypes.Delegation{}, nil +} +func (genesisMockStakingKeeper) BeginRedelegation(context.Context, sdk.AccAddress, sdk.ValAddress, sdk.ValAddress, math.LegacyDec) (time.Time, error) { + return time.Time{}, nil +} +func (genesisMockStakingKeeper) UnbondingTime(context.Context) (time.Duration, error) { return time.Hour, nil } +func (genesisMockStakingKeeper) BondDenom(context.Context) (string, error) { return "stake", nil } +func (genesisMockStakingKeeper) DelegatorDelegations(context.Context, *stakingtypes.QueryDelegatorDelegationsRequest) (*stakingtypes.QueryDelegatorDelegationsResponse, error) { + return &stakingtypes.QueryDelegatorDelegationsResponse{}, nil +} +func (genesisMockEVM) CallEVM( + sdk.Context, abi.ABI, common.Address, common.Address, bool, *big.Int, string, ...any, +) (*evmtypes.MsgEthereumTxResponse, error) { + return &evmtypes.MsgEthereumTxResponse{}, nil +} +func (genesisMockEVM) IsContract(sdk.Context, common.Address) bool { return true } + +func TestGenesis_ExportsAndRestoresPendingRedelegationsOnly(t *testing.T) { + storeKey := storetypes.NewKVStoreKey(types.ModuleName) + tKey := storetypes.NewTransientStoreKey("transient_test") + ctx := testutil.DefaultContext(storeKey, tKey).WithBlockTime(time.Unix(2_000, 0)) + storeService := runtime.NewKVStoreService(storeKey) + cdc := moduletestutil.MakeTestEncodingConfig().Codec + authority := sdk.AccAddress(bytes.Repeat([]byte{9}, 20)) + sk := genesisMockStakingKeeper{} + k := keeper.NewKeeper(cdc, storeService, tKey, sk, sk, nil, authority, genesisMockEVM{}, nil) + + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + srcVal := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + dstVal := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + params := types.DefaultParams() + params.PoolDelegatorAddress = del.String() + require.NoError(t, k.SetParamsForGenesis(ctx, params)) + require.NoError(t, k.SetPendingRedelegation(ctx, types.PendingRedelegation{ + DelegatorAddress: del.String(), + SrcValidatorAddress: srcVal.String(), + DstValidatorAddress: dstVal.String(), + Amount: sdk.NewCoin("stake", math.NewInt(10)), + CompletionTime: ctx.BlockTime().Add(time.Hour), + })) + + exported := ExportGenesis(ctx, k) + require.Len(t, exported.PendingRedelegations, 1) + require.Equal(t, del.String(), exported.PendingRedelegations[0].DelegatorAddress) +} diff --git a/x/poolrebalancer/keeper/community_pool.go b/x/poolrebalancer/keeper/community_pool.go new file mode 100644 index 00000000..c056f8ae --- /dev/null +++ b/x/poolrebalancer/keeper/community_pool.go @@ -0,0 +1,72 @@ +package keeper + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + + evmtypes "github.com/cosmos/evm/x/vm/types" + + "github.com/cosmos/evm/x/poolrebalancer/types" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// ensurePoolRebalancerModuleEVMAccount materializes the module account used as tx sender for CallEVM. +func (k Keeper) ensurePoolRebalancerModuleEVMAccount(ctx sdk.Context) { + if k.accountKeeper == nil { + return + } + moduleAcc := sdk.AccAddress(types.ModuleEVMAddress.Bytes()) + if k.accountKeeper.GetAccount(ctx, moduleAcc) == nil { + k.accountKeeper.SetAccount(ctx, k.accountKeeper.NewAccountWithAddress(ctx, moduleAcc)) + } +} + +// callCommunityPoolEVMWithCommit calls CommunityPool; commit=false is a static call. +func (k Keeper) callCommunityPoolEVMWithCommit(ctx sdk.Context, poolDel sdk.AccAddress, commit bool, method string, args ...any) (*evmtypes.MsgEthereumTxResponse, error) { + if k.evmKeeper == nil { + return nil, errors.New("evm keeper is nil") + } + k.ensurePoolRebalancerModuleEVMAccount(ctx) + poolContract := common.BytesToAddress(poolDel.Bytes()) + return k.evmKeeper.CallEVM(ctx, types.CommunityPoolABI, types.ModuleEVMAddress, poolContract, commit, nil, method, args...) +} + +// callCommunityPoolEVM state-changing CommunityPool call. +func (k Keeper) callCommunityPoolEVM(ctx sdk.Context, poolDel sdk.AccAddress, method string, args ...any) (*evmtypes.MsgEthereumTxResponse, error) { + return k.callCommunityPoolEVMWithCommit(ctx, poolDel, true, method, args...) +} + +// MaybeRunCommunityPoolAutomation runs harvest then stake on PoolDelegatorAddress (best-effort; errors logged). +func (k Keeper) MaybeRunCommunityPoolAutomation(ctx sdk.Context) error { + del, err := k.GetPoolDelegatorAddress(ctx) + if err != nil { + return err + } + if del.Empty() || k.evmKeeper == nil { + return nil + } + + totalUnits, err := k.callCommunityPoolViewUint256(ctx, del, "totalUnits") + if err != nil { + return fmt.Errorf("read community pool totalUnits: %w", err) + } + if !totalUnits.IsPositive() { + return nil + } + + for _, method := range []string{"harvest", "stake"} { + res, callErr := k.callCommunityPoolEVM(ctx, del, method) + if callErr != nil { + ctx.Logger().Error("poolrebalancer: community pool automation call failed", "method", method, "contract", common.BytesToAddress(del.Bytes()).Hex(), "err", callErr) + continue + } + if res != nil && res.Failed() { + ctx.Logger().Error("poolrebalancer: community pool automation vm failed", "method", method, "contract", common.BytesToAddress(del.Bytes()).Hex(), "vm_error", res.VmError) + } + } + + return nil +} diff --git a/x/poolrebalancer/keeper/community_pool_reconcile.go b/x/poolrebalancer/keeper/community_pool_reconcile.go new file mode 100644 index 00000000..fc40c96d --- /dev/null +++ b/x/poolrebalancer/keeper/community_pool_reconcile.go @@ -0,0 +1,40 @@ +package keeper + +import ( + "context" + "fmt" + + "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +// ComputeExpectedBondedPrincipal sums bond tokens for del across validators in Bonded status +// (TokensFromSharesTruncated). Target for CommunityPool totalStaked. +func (k Keeper) ComputeExpectedBondedPrincipal(ctx context.Context, del sdk.AccAddress) (math.Int, error) { + delegations, err := k.getAllDelegatorDelegations(ctx, del) + if err != nil { + return math.ZeroInt(), fmt.Errorf("get delegator delegations: %w", err) + } + total := math.ZeroInt() + for _, d := range delegations { + valAddr, err := sdk.ValAddressFromBech32(d.ValidatorAddress) + if err != nil { + return math.ZeroInt(), err + } + val, err := k.stakingKeeper.GetValidator(ctx, valAddr) + if err != nil { + return math.ZeroInt(), fmt.Errorf("get validator %s: %w", d.ValidatorAddress, err) + } + if val.Status != stakingtypes.Bonded { + continue + } + tokensDec := val.TokensFromSharesTruncated(d.Shares) + tokensInt := tokensDec.TruncateInt() + if tokensInt.IsPositive() { + total = total.Add(tokensInt) + } + } + return total, nil +} diff --git a/x/poolrebalancer/keeper/community_pool_reconcile_abci.go b/x/poolrebalancer/keeper/community_pool_reconcile_abci.go new file mode 100644 index 00000000..8f9f5232 --- /dev/null +++ b/x/poolrebalancer/keeper/community_pool_reconcile_abci.go @@ -0,0 +1,182 @@ +package keeper + +import ( + "context" + "errors" + "fmt" + "math/big" + + "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/ethereum/go-ethereum/accounts/abi" + + "github.com/cosmos/evm/x/poolrebalancer/types" +) + +const ( + // communityPoolReconcileSweepIntervalBlocks runs a reconcile even when dirty is false (slash drift catch-up). + communityPoolReconcileSweepIntervalBlocks int64 = 20 +) + +func (k Keeper) getCommunityPoolReconcileDirty(ctx context.Context) bool { + store := k.storeService.OpenKVStore(ctx) + bz, err := store.Get(types.CommunityPoolReconcileDirtyKey) + if err != nil || len(bz) == 0 { + return false + } + return bz[0] != 0 +} + +func (k Keeper) setCommunityPoolReconcileDirty(ctx context.Context, v bool) error { + store := k.storeService.OpenKVStore(ctx) + if v { + return store.Set(types.CommunityPoolReconcileDirtyKey, []byte{1}) + } + return store.Set(types.CommunityPoolReconcileDirtyKey, []byte{0}) +} + +// markCommunityPoolReconcileDirtyIfPoolDelegator sets the reconcile dirty flag when del is the configured pool delegator. +func (k Keeper) markCommunityPoolReconcileDirtyIfPoolDelegator(ctx context.Context, del sdk.AccAddress) error { + poolDel, err := k.GetPoolDelegatorAddress(ctx) + if err != nil { + return err + } + if poolDel.Empty() || !del.Equals(poolDel) { + return nil + } + return k.setCommunityPoolReconcileDirty(ctx, true) +} + +func (k Keeper) unpackCommunityPoolUint256View(methodName string, ret []byte) (math.Int, error) { + method, ok := types.CommunityPoolABI.Methods[methodName] + if !ok { + return math.ZeroInt(), fmt.Errorf("abi method %q not found", methodName) + } + if len(ret) == 0 { + return math.ZeroInt(), errors.New("empty evm return data") + } + out, err := method.Outputs.Unpack(ret) + if err != nil { + return math.ZeroInt(), err + } + if len(out) != 1 { + return math.ZeroInt(), fmt.Errorf("expected 1 output, got %d", len(out)) + } + bi, ok := out[0].(*big.Int) + if !ok || bi == nil { + return math.ZeroInt(), fmt.Errorf("expected *big.Int, got %T", out[0]) + } + if bi.Sign() < 0 { + return math.ZeroInt(), errors.New("negative uint256") + } + if bi.Cmp(abi.MaxUint256) > 0 { + return math.ZeroInt(), errors.New("uint256 view exceeds max uint256") + } + return math.NewIntFromBigInt(bi), nil +} + +// coerceEVMUint256BigInt returns bi if it fits a Solidity uint256 ABI argument. +func coerceEVMUint256BigInt(bi *big.Int) (*big.Int, error) { + if bi == nil { + return nil, errors.New("nil total-staked big.Int") + } + if bi.Sign() < 0 { + return nil, errors.New("total-staked amount is negative") + } + if bi.Cmp(abi.MaxUint256) > 0 { + return nil, fmt.Errorf("total-staked amount exceeds uint256 (bits>256)") + } + return bi, nil +} + +// communityPoolStakeBucketBigInt coerces total-staked v to a *big.Int valid for Solidity uint256 ABI encoding. +func communityPoolStakeBucketBigInt(v math.Int) (*big.Int, error) { + if v.IsNil() { + return nil, errors.New("nil total-staked amount") + } + if v.IsNegative() { + return nil, fmt.Errorf("total-staked amount is negative: %s", v.String()) + } + return coerceEVMUint256BigInt(v.BigInt()) +} + +func (k Keeper) callCommunityPoolViewUint256(ctx sdk.Context, poolDel sdk.AccAddress, method string) (math.Int, error) { + res, err := k.callCommunityPoolEVMWithCommit(ctx, poolDel, false, method) + if err != nil { + return math.ZeroInt(), err + } + if res != nil && res.Failed() { + return math.ZeroInt(), fmt.Errorf("vm error: %s", res.VmError) + } + if res == nil { + return math.ZeroInt(), errors.New("nil evm response") + } + return k.unpackCommunityPoolUint256View(method, res.Ret) +} + +func (k Keeper) tryReadOnChainCommunityPoolTotalStaked(ctx sdk.Context, poolDel sdk.AccAddress) (math.Int, error) { + return k.callCommunityPoolViewUint256(ctx, poolDel, "totalStaked") +} + +// MaybeReconcileCommunityPoolStakedBuckets aligns EVM totalStaked with staking when dirty or on a sweep block. +// Returns errors for tests/metrics; EndBlocker only logs them. +func (k Keeper) MaybeReconcileCommunityPoolStakedBuckets(ctx context.Context) error { + poolDel, err := k.GetPoolDelegatorAddress(ctx) + if err != nil { + return err + } + if poolDel.Empty() || k.evmKeeper == nil { + return nil + } + + sdkCtx := sdk.UnwrapSDKContext(ctx) + height := sdkCtx.BlockHeight() + dirty := k.getCommunityPoolReconcileDirty(ctx) + sweep := communityPoolReconcileSweepIntervalBlocks > 0 && + height%communityPoolReconcileSweepIntervalBlocks == 0 + if !dirty && !sweep { + return nil + } + + expBonded, err := k.ComputeExpectedBondedPrincipal(ctx, poolDel) + if err != nil { + return fmt.Errorf("compute expected community pool total staked: %w", err) + } + + onBonded, staticErr := k.tryReadOnChainCommunityPoolTotalStaked(sdkCtx, poolDel) + if staticErr == nil && onBonded.Equal(expBonded) { + return k.setCommunityPoolReconcileDirty(ctx, false) + } + + bondedArg, err := communityPoolStakeBucketBigInt(expBonded) + if err != nil { + _ = k.setCommunityPoolReconcileDirty(ctx, true) //nolint:errcheck // persist dirty for retry after fix + return fmt.Errorf("expected bonded principal for EVM: %w", err) + } + res, err := k.callCommunityPoolEVMWithCommit( + sdkCtx, + poolDel, + true, + "reconcileTotalStaked", + bondedArg, + ) + if err != nil { + _ = k.setCommunityPoolReconcileDirty(ctx, true) //nolint:errcheck // best-effort persist dirty for retry + return fmt.Errorf("reconcileTotalStaked: %w", err) + } + if res != nil && res.Failed() { + _ = k.setCommunityPoolReconcileDirty(ctx, true) //nolint:errcheck + return fmt.Errorf("reconcileTotalStaked vm error: %s", res.VmError) + } + return k.setCommunityPoolReconcileDirty(ctx, false) +} + +// MaybeReconcileCommunityPoolStakedBucketsSecondPass runs reconcile again after ProcessRebalance if enabled (tests). +func (k Keeper) MaybeReconcileCommunityPoolStakedBucketsSecondPass(ctx context.Context) error { + if k.communityPoolReconcileSecondPass == nil || !k.communityPoolReconcileSecondPass.Load() { + return nil + } + return k.MaybeReconcileCommunityPoolStakedBuckets(ctx) +} diff --git a/x/poolrebalancer/keeper/community_pool_reconcile_abci_test.go b/x/poolrebalancer/keeper/community_pool_reconcile_abci_test.go new file mode 100644 index 00000000..b9fc6210 --- /dev/null +++ b/x/poolrebalancer/keeper/community_pool_reconcile_abci_test.go @@ -0,0 +1,73 @@ +package keeper + +import ( + "bytes" + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + "github.com/cosmos/evm/x/poolrebalancer/types" +) + +func packCommunityPoolUint256View(t *testing.T, method string, v *big.Int) []byte { + t.Helper() + m, ok := types.CommunityPoolABI.Methods[method] + require.True(t, ok, "abi method %q", method) + bz, err := m.Outputs.Pack(v) + require.NoError(t, err) + return bz +} + +func TestMaybeReconcileCommunityPoolStakedBuckets_UsesReconcileTotalStaked(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + poolDel := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + setPoolDelegatorForTest(t, ctx, &k, poolDel) + + val := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + mockSK := k.stakingKeeper.(*mockStakingKeeper) + mockSK.validatorByAddr = map[string]stakingtypes.Validator{ + val.String(): { + OperatorAddress: val.String(), + Tokens: math.NewInt(1000), + DelegatorShares: math.LegacyNewDec(1000), + Status: stakingtypes.Bonded, + }, + } + mockSK.delegations = []stakingtypes.Delegation{ + {DelegatorAddress: poolDel.String(), ValidatorAddress: val.String(), Shares: math.LegacyNewDec(12)}, + } + + mockEVM := &mockEVMKeeper{ + ViewRetEncoder: func(method string) ([]byte, error) { + return packCommunityPoolUint256View(t, method, big.NewInt(1)), nil + }, + } + k.evmKeeper = mockEVM + require.NoError(t, k.setCommunityPoolReconcileDirty(ctx, true)) + + require.NoError(t, k.MaybeReconcileCommunityPoolStakedBuckets(ctx)) + require.Contains(t, mockEVM.methods, "reconcileTotalStaked") +} + +func TestMaybeReconcileCommunityPoolStakedBuckets_StaticReadClearsDirty(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + poolDel := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + setPoolDelegatorForTest(t, ctx, &k, poolDel) + + mockEVM := &mockEVMKeeper{ + ViewRetEncoder: func(method string) ([]byte, error) { + return packCommunityPoolUint256View(t, method, big.NewInt(0)), nil + }, + } + k.evmKeeper = mockEVM + require.NoError(t, k.setCommunityPoolReconcileDirty(ctx, true)) + + require.NoError(t, k.MaybeReconcileCommunityPoolStakedBuckets(ctx)) + require.False(t, k.getCommunityPoolReconcileDirty(ctx)) + require.Equal(t, []string{"totalStaked"}, mockEVM.methods) +} diff --git a/x/poolrebalancer/keeper/community_pool_reconcile_second_pass_test.go b/x/poolrebalancer/keeper/community_pool_reconcile_second_pass_test.go new file mode 100644 index 00000000..afdb565b --- /dev/null +++ b/x/poolrebalancer/keeper/community_pool_reconcile_second_pass_test.go @@ -0,0 +1,55 @@ +package keeper + +import ( + "bytes" + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/evm/x/poolrebalancer/types" +) + +func TestMaybeReconcileCommunityPoolStakedBucketsSecondPass_NoOpWhenDisabled(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + k.SetCommunityPoolReconcileSecondPassForTesting(false) + t.Cleanup(func() { k.SetCommunityPoolReconcileSecondPassForTesting(false) }) + mockEVM := &mockEVMKeeper{} + k.evmKeeper = mockEVM + + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + p := types.DefaultParams() + p.PoolDelegatorAddress = del.String() + require.NoError(t, k.SetParams(ctx, p)) + + ctx = ctx.WithBlockHeight(19) + require.NoError(t, k.setCommunityPoolReconcileDirty(ctx, true)) + + require.NoError(t, k.MaybeReconcileCommunityPoolStakedBucketsSecondPass(ctx)) + require.Empty(t, mockEVM.methods) +} + +func TestMaybeReconcileCommunityPoolStakedBucketsSecondPass_DelegatesWhenEnabled(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + k.SetCommunityPoolReconcileSecondPassForTesting(true) + t.Cleanup(func() { k.SetCommunityPoolReconcileSecondPassForTesting(false) }) + mockEVM := &mockEVMKeeper{ + ViewRetEncoder: func(method string) ([]byte, error) { + return packCommunityPoolUint256View(t, method, big.NewInt(1)), nil + }, + } + k.evmKeeper = mockEVM + + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + p := types.DefaultParams() + p.PoolDelegatorAddress = del.String() + require.NoError(t, k.SetParams(ctx, p)) + + ctx = ctx.WithBlockHeight(19) + require.NoError(t, k.setCommunityPoolReconcileDirty(ctx, true)) + + require.NoError(t, k.MaybeReconcileCommunityPoolStakedBucketsSecondPass(ctx)) + require.Equal(t, []string{"totalStaked", "reconcileTotalStaked"}, mockEVM.methods) +} diff --git a/x/poolrebalancer/keeper/community_pool_reconcile_test.go b/x/poolrebalancer/keeper/community_pool_reconcile_test.go new file mode 100644 index 00000000..0694c597 --- /dev/null +++ b/x/poolrebalancer/keeper/community_pool_reconcile_test.go @@ -0,0 +1,65 @@ +package keeper + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +func TestComputeExpectedBondedPrincipal_SkipsNonBondedValidators(t *testing.T) { + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + bondedVal := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + unbondingVal := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + + sk := &mockStakingKeeper{ + validatorByAddr: map[string]stakingtypes.Validator{ + bondedVal.String(): { + OperatorAddress: bondedVal.String(), + Tokens: math.NewInt(1000), + DelegatorShares: math.LegacyNewDec(1000), + Status: stakingtypes.Bonded, + }, + unbondingVal.String(): { + OperatorAddress: unbondingVal.String(), + Tokens: math.NewInt(500), + DelegatorShares: math.LegacyNewDec(500), + Status: stakingtypes.Unbonding, + }, + }, + delegations: []stakingtypes.Delegation{ + {DelegatorAddress: del.String(), ValidatorAddress: bondedVal.String(), Shares: math.LegacyNewDec(100)}, + {DelegatorAddress: del.String(), ValidatorAddress: unbondingVal.String(), Shares: math.LegacyNewDec(50)}, + }, + } + ctx, k := newProcessRebalanceKeeper(t, sk) + sum, err := k.ComputeExpectedBondedPrincipal(ctx, del) + require.NoError(t, err) + require.Equal(t, "100", sum.String()) +} + +func TestGetAllDelegatorDelegations_PaginatesAcrossPages(t *testing.T) { + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + baseVal := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + delegations := make([]stakingtypes.Delegation, 0, delegatorDelegationPageLimit+1) + for i := uint64(0); i <= delegatorDelegationPageLimit; i++ { + valBytes := append([]byte{}, baseVal.Bytes()...) + valBytes[len(valBytes)-1] = byte(i % 255) + val := sdk.ValAddress(valBytes) + delegations = append(delegations, stakingtypes.Delegation{ + DelegatorAddress: del.String(), + ValidatorAddress: val.String(), + Shares: math.LegacyNewDec(1), + }) + } + sk := &mockStakingKeeper{delegations: delegations} + ctx, k := newProcessRebalanceKeeper(t, sk) + + got, err := k.getAllDelegatorDelegations(ctx, del) + require.NoError(t, err) + require.Len(t, got, int(delegatorDelegationPageLimit+1)) +} diff --git a/x/poolrebalancer/keeper/community_pool_test.go b/x/poolrebalancer/keeper/community_pool_test.go new file mode 100644 index 00000000..6da81ebb --- /dev/null +++ b/x/poolrebalancer/keeper/community_pool_test.go @@ -0,0 +1,165 @@ +package keeper + +import ( + "bytes" + "errors" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + pooltypes "github.com/cosmos/evm/x/poolrebalancer/types" + evmtypes "github.com/cosmos/evm/x/vm/types" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type mockEVMKeeper struct { + methods []string + commits []bool + froms []common.Address + contracts []common.Address + args [][]any + + errByMethod map[string]error + failedVM map[string]string // method -> VmError (non-empty => Failed()) + + // ViewRetEncoder, when set, supplies Ret for commit=false eth_call-style invocations (e.g. uint256 getters). + ViewRetEncoder func(method string) ([]byte, error) + + // isContractFn optionally gates IsContract; nil means all addresses are treated as contracts. + isContractFn func(common.Address) bool +} + +func (m *mockEVMKeeper) IsContract(_ sdk.Context, addr common.Address) bool { + if m != nil && m.isContractFn != nil { + return m.isContractFn(addr) + } + return true +} + +func (m *mockEVMKeeper) CallEVM( + ctx sdk.Context, + abi abi.ABI, + from, contract common.Address, + commit bool, + gasCap *big.Int, + method string, + args ...any, +) (*evmtypes.MsgEthereumTxResponse, error) { + m.methods = append(m.methods, method) + m.commits = append(m.commits, commit) + m.froms = append(m.froms, from) + m.contracts = append(m.contracts, contract) + m.args = append(m.args, append([]any(nil), args...)) + if err, ok := m.errByMethod[method]; ok { + return nil, err + } + vmErr := "" + if m.failedVM != nil { + vmErr = m.failedVM[method] + } + var ret []byte + if !commit && m.ViewRetEncoder != nil { + enc, err := m.ViewRetEncoder(method) + if err != nil { + return nil, err + } + ret = enc + } + return &evmtypes.MsgEthereumTxResponse{VmError: vmErr, Ret: ret}, nil +} + +func TestMaybeRunCommunityPoolAutomation_SkipsWhenPoolDelegatorUnset(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + mockEVM := &mockEVMKeeper{} + k.evmKeeper = mockEVM + + require.NoError(t, k.MaybeRunCommunityPoolAutomation(ctx)) + require.Empty(t, mockEVM.methods) +} + +func TestMaybeRunCommunityPoolAutomation_SkipsWhenEVMKeeperUnset(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + mockEVM := &mockEVMKeeper{} + k.evmKeeper = mockEVM + + del := sdk.AccAddress(bytes.Repeat([]byte{7}, 20)) + params := pooltypes.DefaultParams() + params.PoolDelegatorAddress = del.String() + require.NoError(t, k.SetParams(ctx, params)) + + k.evmKeeper = nil + require.NoError(t, k.MaybeRunCommunityPoolAutomation(ctx)) + require.Empty(t, mockEVM.methods) +} + +func TestMaybeRunCommunityPoolAutomation_CallsHarvestThenStake(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + mockEVM := &mockEVMKeeper{ + ViewRetEncoder: func(method string) ([]byte, error) { + return packCommunityPoolUint256View(t, method, big.NewInt(1)), nil + }, + } + k.evmKeeper = mockEVM + + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + params := pooltypes.DefaultParams() + params.PoolDelegatorAddress = del.String() + require.NoError(t, k.SetParams(ctx, params)) + + require.NoError(t, k.MaybeRunCommunityPoolAutomation(ctx)) + require.Equal(t, []string{"totalUnits", "harvest", "stake"}, mockEVM.methods) + + expectedContract := common.BytesToAddress(del.Bytes()) + require.Len(t, mockEVM.froms, 3) + require.Len(t, mockEVM.contracts, 3) + require.Equal(t, pooltypes.ModuleEVMAddress, mockEVM.froms[0]) + require.Equal(t, pooltypes.ModuleEVMAddress, mockEVM.froms[1]) + require.Equal(t, pooltypes.ModuleEVMAddress, mockEVM.froms[2]) + require.Equal(t, expectedContract, mockEVM.contracts[0]) + require.Equal(t, expectedContract, mockEVM.contracts[1]) + require.Equal(t, expectedContract, mockEVM.contracts[2]) +} + +func TestMaybeRunCommunityPoolAutomation_HarvestFailureDoesNotBlockStake(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + mockEVM := &mockEVMKeeper{ + ViewRetEncoder: func(method string) ([]byte, error) { + return packCommunityPoolUint256View(t, method, big.NewInt(1)), nil + }, + errByMethod: map[string]error{ + "harvest": errors.New("mock harvest failure"), + }, + } + k.evmKeeper = mockEVM + + del := sdk.AccAddress(bytes.Repeat([]byte{2}, 20)) + params := pooltypes.DefaultParams() + params.PoolDelegatorAddress = del.String() + require.NoError(t, k.SetParams(ctx, params)) + + require.NoError(t, k.MaybeRunCommunityPoolAutomation(ctx)) + require.Equal(t, []string{"totalUnits", "harvest", "stake"}, mockEVM.methods) +} + +func TestMaybeRunCommunityPoolAutomation_SkipsWhenPoolHasZeroUnits(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + mockEVM := &mockEVMKeeper{ + ViewRetEncoder: func(method string) ([]byte, error) { + return packCommunityPoolUint256View(t, method, big.NewInt(0)), nil + }, + } + k.evmKeeper = mockEVM + + del := sdk.AccAddress(bytes.Repeat([]byte{3}, 20)) + params := pooltypes.DefaultParams() + params.PoolDelegatorAddress = del.String() + require.NoError(t, k.SetParams(ctx, params)) + + require.NoError(t, k.MaybeRunCommunityPoolAutomation(ctx)) + require.Equal(t, []string{"totalUnits"}, mockEVM.methods) + require.Equal(t, []bool{false}, mockEVM.commits) +} diff --git a/x/poolrebalancer/keeper/delegation_scan.go b/x/poolrebalancer/keeper/delegation_scan.go new file mode 100644 index 00000000..59a42225 --- /dev/null +++ b/x/poolrebalancer/keeper/delegation_scan.go @@ -0,0 +1,46 @@ +package keeper + +import ( + "context" + "fmt" + + "github.com/cosmos/cosmos-sdk/types/query" + sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +const delegatorDelegationPageLimit uint64 = 200 + +func (k Keeper) getAllDelegatorDelegations(ctx context.Context, delegator sdk.AccAddress) ([]stakingtypes.Delegation, error) { + if k.stakingQuerier == nil { + return nil, fmt.Errorf("staking querier is not configured") + } + + delegatorAddr := delegator.String() + var ( + out []stakingtypes.Delegation + nextKey []byte + ) + + for { + res, err := k.stakingQuerier.DelegatorDelegations(ctx, &stakingtypes.QueryDelegatorDelegationsRequest{ + DelegatorAddr: delegatorAddr, + Pagination: &query.PageRequest{ + Key: nextKey, + Limit: delegatorDelegationPageLimit, + }, + }) + if err != nil { + return nil, fmt.Errorf("delegator delegations page query for %s: %w", delegatorAddr, err) + } + for _, dr := range res.DelegationResponses { + out = append(out, dr.Delegation) + } + if res.Pagination == nil || len(res.Pagination.NextKey) == 0 { + break + } + nextKey = res.Pagination.NextKey + } + + return out, nil +} diff --git a/x/poolrebalancer/keeper/evm_interface_test.go b/x/poolrebalancer/keeper/evm_interface_test.go new file mode 100644 index 00000000..c097808b --- /dev/null +++ b/x/poolrebalancer/keeper/evm_interface_test.go @@ -0,0 +1,9 @@ +package keeper + +import ( + pooltypes "github.com/cosmos/evm/x/poolrebalancer/types" + evmkeeper "github.com/cosmos/evm/x/vm/keeper" +) + +// Compile-time contract: vm keeper must satisfy poolrebalancer's minimal EVM interface. +var _ pooltypes.EVMKeeper = (*evmkeeper.Keeper)(nil) diff --git a/x/poolrebalancer/keeper/genesis.go b/x/poolrebalancer/keeper/genesis.go new file mode 100644 index 00000000..db263031 --- /dev/null +++ b/x/poolrebalancer/keeper/genesis.go @@ -0,0 +1,46 @@ +package keeper + +import ( + "context" + + "github.com/cosmos/evm/x/poolrebalancer/types" + + storetypes "cosmossdk.io/store/types" + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// SetPendingRedelegation writes a pending redelegation entry to the store, including its queue and index keys. +// This is intended for genesis import/export. +func (k Keeper) SetPendingRedelegation(ctx context.Context, entry types.PendingRedelegation) error { + del, err := sdk.AccAddressFromBech32(entry.DelegatorAddress) + if err != nil { + return err + } + srcVal, err := sdk.ValAddressFromBech32(entry.SrcValidatorAddress) + if err != nil { + return err + } + dstVal, err := sdk.ValAddressFromBech32(entry.DstValidatorAddress) + if err != nil { + return err + } + return k.addPendingRedelegation(ctx, del, srcVal, dstVal, entry.Amount, entry.CompletionTime) +} + +// GetAllPendingRedelegations returns all pending redelegation entries stored under the primary key prefix. +func (k Keeper) GetAllPendingRedelegations(ctx context.Context) ([]types.PendingRedelegation, error) { + store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + iter := storetypes.KVStorePrefixIterator(store, types.PendingRedelegationKey) + defer iter.Close() //nolint:errcheck + + out := make([]types.PendingRedelegation, 0) + for ; iter.Valid(); iter.Next() { + var entry types.PendingRedelegation + if err := k.cdc.Unmarshal(iter.Value(), &entry); err != nil { + return nil, err + } + out = append(out, entry) + } + return out, nil +} diff --git a/x/poolrebalancer/keeper/grpc_query.go b/x/poolrebalancer/keeper/grpc_query.go new file mode 100644 index 00000000..a4c5f647 --- /dev/null +++ b/x/poolrebalancer/keeper/grpc_query.go @@ -0,0 +1,61 @@ +package keeper + +import ( + "context" + + "cosmossdk.io/store/prefix" + + "github.com/cosmos/evm/x/poolrebalancer/types" + + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var _ types.QueryServer = QueryServer{} + +type QueryServer struct { + k Keeper +} + +func NewQueryServer(k Keeper) QueryServer { + return QueryServer{k: k} +} + +func (qs QueryServer) Params(ctx context.Context, _ *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { + params, err := qs.k.GetParams(ctx) + if err != nil { + return nil, err + } + return &types.QueryParamsResponse{Params: params}, nil +} + +func (qs QueryServer) PendingRedelegations( + ctx context.Context, + req *types.QueryPendingRedelegationsRequest, +) (*types.QueryPendingRedelegationsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + store := runtime.KVStoreAdapter(qs.k.storeService.OpenKVStore(ctx)) + pstore := prefix.NewStore(store, types.PendingRedelegationKey) + + var out []types.PendingRedelegation + pageRes, err := query.Paginate(pstore, req.Pagination, func(key, value []byte) error { + var entry types.PendingRedelegation + if err := qs.k.cdc.Unmarshal(value, &entry); err != nil { + return err + } + out = append(out, entry) + return nil + }) + if err != nil { + return nil, err + } + + return &types.QueryPendingRedelegationsResponse{ + Redelegations: out, + Pagination: pageRes, + }, nil +} diff --git a/x/poolrebalancer/keeper/grpc_query_test.go b/x/poolrebalancer/keeper/grpc_query_test.go new file mode 100644 index 00000000..45ab4b9e --- /dev/null +++ b/x/poolrebalancer/keeper/grpc_query_test.go @@ -0,0 +1,65 @@ +package keeper + +import ( + "bytes" + "testing" + "time" + + "cosmossdk.io/math" + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkquery "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/cosmos/evm/x/poolrebalancer/types" +) + +func TestQueryParams_RoundTrip(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + + params := types.DefaultParams() + params.MaxOpsPerBlock = 7 + require.NoError(t, k.SetParams(ctx, params)) + + qs := NewQueryServer(k) + res, err := qs.Params(ctx, &types.QueryParamsRequest{}) + require.NoError(t, err) + require.Equal(t, uint32(7), res.Params.MaxOpsPerBlock) +} + +func TestQueryPendingRedelegations_DecodesProtoValues(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + ctx = ctx.WithBlockTime(time.Unix(2_000, 0)) + + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + srcVal := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + dstVal := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + + entry := types.PendingRedelegation{ + DelegatorAddress: del.String(), + SrcValidatorAddress: srcVal.String(), + DstValidatorAddress: dstVal.String(), + Amount: sdk.NewCoin("stake", math.NewInt(5)), + CompletionTime: ctx.BlockTime().Add(time.Hour), + } + require.NoError(t, k.SetPendingRedelegation(ctx, entry)) + + qs := NewQueryServer(k) + res, err := qs.PendingRedelegations(ctx, &types.QueryPendingRedelegationsRequest{ + Pagination: &sdkquery.PageRequest{Limit: 1}, + }) + require.NoError(t, err) + require.Len(t, res.Redelegations, 1) + require.Equal(t, entry.DelegatorAddress, res.Redelegations[0].DelegatorAddress) +} + +func TestQueryPendingRedelegations_NilRequest(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + qs := NewQueryServer(k) + + _, err := qs.PendingRedelegations(ctx, nil) + require.Error(t, err) + require.Equal(t, codes.InvalidArgument, status.Code(err)) +} diff --git a/x/poolrebalancer/keeper/keeper.go b/x/poolrebalancer/keeper/keeper.go new file mode 100644 index 00000000..27eb2dde --- /dev/null +++ b/x/poolrebalancer/keeper/keeper.go @@ -0,0 +1,66 @@ +package keeper + +import ( + "sync/atomic" + "testing" + + "cosmossdk.io/core/store" + storetypes "cosmossdk.io/store/types" + "github.com/cosmos/evm/x/poolrebalancer/types" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// Keeper holds state and dependencies for the pool rebalancer. +type Keeper struct { + storeService store.KVStoreService + transientKey *storetypes.TransientStoreKey + cdc codec.BinaryCodec + stakingKeeper types.StakingKeeper + stakingQuerier types.StakingQuerier + distrKeeper types.DistributionKeeper + evmKeeper types.EVMKeeper + accountKeeper types.AccountKeeper + authority sdk.AccAddress + // Second EndBlock reconcile after ProcessRebalance; default off. Shared pointer across Keeper copies. + communityPoolReconcileSecondPass *atomic.Bool +} + +// NewKeeper returns a new Keeper. +func NewKeeper( + cdc codec.BinaryCodec, + storeService store.KVStoreService, + transientKey *storetypes.TransientStoreKey, + stakingKeeper types.StakingKeeper, + stakingQuerier types.StakingQuerier, + distrKeeper types.DistributionKeeper, + authority sdk.AccAddress, + evmKeeper types.EVMKeeper, + accountKeeper types.AccountKeeper, +) Keeper { + if err := sdk.VerifyAddressFormat(authority); err != nil { + panic(err) + } + sp := &atomic.Bool{} + return Keeper{ + storeService: storeService, + transientKey: transientKey, + cdc: cdc, + stakingKeeper: stakingKeeper, + stakingQuerier: stakingQuerier, + distrKeeper: distrKeeper, + evmKeeper: evmKeeper, + accountKeeper: accountKeeper, + authority: authority, + communityPoolReconcileSecondPass: sp, + } +} + +// SetCommunityPoolReconcileSecondPassForTesting toggles the second reconcile path. No-op unless testing.Testing(). +func (k *Keeper) SetCommunityPoolReconcileSecondPassForTesting(v bool) { + if !testing.Testing() { + return + } + k.communityPoolReconcileSecondPass.Store(v) +} diff --git a/x/poolrebalancer/keeper/msg_server.go b/x/poolrebalancer/keeper/msg_server.go new file mode 100644 index 00000000..b8005e4c --- /dev/null +++ b/x/poolrebalancer/keeper/msg_server.go @@ -0,0 +1,39 @@ +package keeper + +import ( + "context" + + "github.com/cosmos/evm/x/poolrebalancer/types" + + errorsmod "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" +) + +// UpdateParams updates module params. Caller must be the governance module account. +func (k *Keeper) UpdateParams(goCtx context.Context, req *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) { + if req == nil { + return nil, errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "empty update params request") + } + + if k.authority.String() != req.Authority { + return nil, errorsmod.Wrapf( + govtypes.ErrInvalidSigner, + "invalid authority; expected %s, got %s", + k.authority.String(), + req.Authority, + ) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + if err := req.Params.Validate(); err != nil { + return nil, err + } + if err := k.SetParams(ctx, req.Params); err != nil { + return nil, err + } + + return &types.MsgUpdateParamsResponse{}, nil +} diff --git a/x/poolrebalancer/keeper/msg_server_test.go b/x/poolrebalancer/keeper/msg_server_test.go new file mode 100644 index 00000000..3403a71f --- /dev/null +++ b/x/poolrebalancer/keeper/msg_server_test.go @@ -0,0 +1,266 @@ +package keeper + +import ( + "bytes" + "testing" + "time" + + "cosmossdk.io/math" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + sdk "github.com/cosmos/cosmos-sdk/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/ethereum/go-ethereum/common" + + "github.com/cosmos/evm/x/poolrebalancer/types" +) + +func TestUpdateParams_RejectsWrongAuthority(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + + // Current keeper authority is 0x09..09; use a different address. + wrongAuthority := sdk.AccAddress(bytes.Repeat([]byte{8}, 20)).String() + + msg := &types.MsgUpdateParams{ + Authority: wrongAuthority, + Params: types.DefaultParams(), + } + + _, err := k.UpdateParams(ctx, msg) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid authority") +} + +func TestUpdateParams_RejectsNilRequest(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + + _, err := k.UpdateParams(ctx, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "empty update params request") +} + +func TestUpdateParams_AcceptsAuthorityAndUpdatesParams(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + + authority := k.authority.String() + + newParams := types.DefaultParams() + newParams.MaxOpsPerBlock = 9 + newParams.MaxMovePerOp = math.NewInt(77) + + msg := &types.MsgUpdateParams{ + Authority: authority, + Params: newParams, + } + + _, err := k.UpdateParams(ctx, msg) + require.NoError(t, err) + + got, err := k.GetParams(ctx) + require.NoError(t, err) + require.Equal(t, uint32(9), got.MaxOpsPerBlock) + require.True(t, got.MaxMovePerOp.Equal(math.NewInt(77))) +} + +func TestUpdateParams_RejectsInvalidParamsWithValidAuthority(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + + authority := k.authority.String() + invalid := types.DefaultParams() + invalid.MaxTargetValidators = 0 + + msg := &types.MsgUpdateParams{ + Authority: authority, + Params: invalid, + } + + _, err := k.UpdateParams(ctx, msg) + require.Error(t, err) + require.Contains(t, err.Error(), "max_target_validators must be positive") +} + +// UpdateParams calls SetParams, so pool_delegator validation applies on the gov path too. +func TestUpdateParams_RejectsNonEmptyPoolWhenEVMKeeperNil(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + + params := types.DefaultParams() + params.PoolDelegatorAddress = sdk.AccAddress(bytes.Repeat([]byte{1}, 20)).String() + + _, err := k.UpdateParams(ctx, &types.MsgUpdateParams{ + Authority: k.authority.String(), + Params: params, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "requires evm keeper") +} + +func TestUpdateParams_RejectsUserAccountPoolDelegator(t *testing.T) { + ctx, k, mockAcc := newTestKeeper(t) + k.evmKeeper = &mockEVMKeeper{} + + priv := secp256k1.GenPrivKey() + pub := priv.PubKey() + addr := sdk.AccAddress(pub.Address()) + mockAcc.SetAccount(ctx, authtypes.NewBaseAccount(addr, pub, 0, 0)) + + params := types.DefaultParams() + params.PoolDelegatorAddress = addr.String() + + _, err := k.UpdateParams(ctx, &types.MsgUpdateParams{ + Authority: k.authority.String(), + Params: params, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "user account with signing keys") +} + +func TestUpdateParams_RejectsNonContractWithoutAuthAccountAtRuntime(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + k.evmKeeper = &mockEVMKeeper{ + isContractFn: func(common.Address) bool { return false }, + } + addr := sdk.AccAddress(bytes.Repeat([]byte{0xAB}, 20)) + params := types.DefaultParams() + params.PoolDelegatorAddress = addr.String() + + _, err := k.UpdateParams(ctx, &types.MsgUpdateParams{ + Authority: k.authority.String(), + Params: params, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "must be an EVM contract") +} + +func TestMsgUpdateParams_ValidateBasic_RejectsInvalidParams(t *testing.T) { + msg := &types.MsgUpdateParams{ + Authority: sdk.AccAddress(bytes.Repeat([]byte{1}, 20)).String(), + Params: types.Params{ + PoolDelegatorAddress: "", + MaxTargetValidators: 0, // invalid + RebalanceThresholdBp: 50, + MaxOpsPerBlock: 5, + MaxMovePerOp: math.ZeroInt(), + }, + } + + require.Error(t, msg.ValidateBasic()) +} + +func TestSetParams_RejectsInvalidParamsDirectly(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + + invalid := types.DefaultParams() + invalid.MaxTargetValidators = 0 + + err := k.SetParams(ctx, invalid) + require.Error(t, err) + require.Contains(t, err.Error(), "max_target_validators must be positive") +} + +func TestSetParams_RejectsNonEmptyPoolWhenEVMKeeperNil(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + params := types.DefaultParams() + params.PoolDelegatorAddress = sdk.AccAddress(bytes.Repeat([]byte{1}, 20)).String() + + err := k.SetParams(ctx, params) + require.Error(t, err) + require.Contains(t, err.Error(), "requires evm keeper") +} + +func TestSetParams_RejectsNonEmptyPoolWhenAuthAndEVMUnset(t *testing.T) { + ctx, k := newTestKeeperNilAuthAndEVM(t) + params := types.DefaultParams() + params.PoolDelegatorAddress = sdk.AccAddress(bytes.Repeat([]byte{1}, 20)).String() + + err := k.SetParams(ctx, params) + require.Error(t, err) + require.Contains(t, err.Error(), "requires account keeper or evm keeper for validation") +} + +// User account with pubkey (signing keys); same intent as plan's RejectsUserAccountWithPubkey. +func TestSetParams_RejectsUserAccountPoolDelegator(t *testing.T) { + ctx, k, mockAcc := newTestKeeper(t) + k.evmKeeper = &mockEVMKeeper{} + + priv := secp256k1.GenPrivKey() + pub := priv.PubKey() + addr := sdk.AccAddress(pub.Address()) + acc := authtypes.NewBaseAccount(addr, pub, 0, 0) + mockAcc.SetAccount(ctx, acc) + + params := types.DefaultParams() + params.PoolDelegatorAddress = addr.String() + + err := k.SetParams(ctx, params) + require.Error(t, err) + require.Contains(t, err.Error(), "user account with signing keys") +} + +func TestSetParams_RejectsNonContractWithoutAuthAccountAtRuntime(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + k.evmKeeper = &mockEVMKeeper{ + isContractFn: func(common.Address) bool { return false }, + } + addr := sdk.AccAddress(bytes.Repeat([]byte{0xAB}, 20)) + params := types.DefaultParams() + params.PoolDelegatorAddress = addr.String() + + err := k.SetParams(ctx, params) + require.Error(t, err) + require.Contains(t, err.Error(), "must be an EVM contract") +} + +func TestSetParamsForGenesis_AcceptsBootstrapNoAuthAccount(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + k.evmKeeper = &mockEVMKeeper{ + isContractFn: func(common.Address) bool { return false }, + } + addr := sdk.AccAddress(bytes.Repeat([]byte{0xAB}, 20)) + params := types.DefaultParams() + params.PoolDelegatorAddress = addr.String() + + require.NoError(t, k.SetParamsForGenesis(ctx, params)) +} + +func TestSetParams_RejectsChangingPoolDelegatorWhenTrackedRedelegationsExist(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + k.evmKeeper = &mockEVMKeeper{} + + currentPool := sdk.AccAddress(bytes.Repeat([]byte{0xAB}, 20)) + nextPool := sdk.AccAddress(bytes.Repeat([]byte{0xBC}, 20)) + params := types.DefaultParams() + params.PoolDelegatorAddress = currentPool.String() + require.NoError(t, k.SetParams(ctx, params)) + + srcVal := sdk.ValAddress(bytes.Repeat([]byte{0xCD}, 20)) + dstVal := sdk.ValAddress(bytes.Repeat([]byte{0xDE}, 20)) + require.NoError(t, k.SetPendingRedelegation(ctx, types.PendingRedelegation{ + DelegatorAddress: currentPool.String(), + SrcValidatorAddress: srcVal.String(), + DstValidatorAddress: dstVal.String(), + Amount: sdk.NewCoin("stake", math.NewInt(10)), + CompletionTime: sdk.UnwrapSDKContext(ctx).BlockTime().Add(time.Hour), + })) + + params.PoolDelegatorAddress = nextPool.String() + err := k.SetParams(ctx, params) + require.Error(t, err) + require.Contains(t, err.Error(), "pending redelegations exist for current pool delegator") +} + +func TestSetParams_RejectsNonContractWhenAccountExistsWithoutBootstrap(t *testing.T) { + ctx, k, mockAcc := newTestKeeper(t) + k.evmKeeper = &mockEVMKeeper{ + isContractFn: func(common.Address) bool { return false }, + } + addr := sdk.AccAddress(bytes.Repeat([]byte{0xAB}, 20)) + mockAcc.SetAccount(ctx, authtypes.NewBaseAccountWithAddress(addr)) + + params := types.DefaultParams() + params.PoolDelegatorAddress = addr.String() + + err := k.SetParams(ctx, params) + require.Error(t, err) + require.Contains(t, err.Error(), "must be an EVM contract") +} diff --git a/x/poolrebalancer/keeper/params.go b/x/poolrebalancer/keeper/params.go new file mode 100644 index 00000000..2c6693fb --- /dev/null +++ b/x/poolrebalancer/keeper/params.go @@ -0,0 +1,169 @@ +// Package keeper implements the poolrebalancer module keeper. +// +// params.go contains params get/set helpers and typed accessors. +package keeper + +import ( + "context" + "fmt" + + "github.com/cosmos/evm/x/poolrebalancer/types" + + "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// GetParams returns the current module params. +func (k Keeper) GetParams(ctx context.Context) (params types.Params, err error) { + store := k.storeService.OpenKVStore(ctx) + bz, err := store.Get(types.ParamsKey) + if err != nil { + return params, err + } + if bz == nil || len(bz) == 0 { + return types.DefaultParams(), nil + } + if err := k.cdc.Unmarshal(bz, ¶ms); err != nil { + return params, err + } + return params, nil +} + +func (k Keeper) setParams(ctx context.Context, params types.Params, allowBootstrap bool) error { + if err := params.Validate(); err != nil { + return err + } + currentParams, err := k.GetParams(ctx) + if err != nil { + return err + } + if err := k.validatePoolDelegatorAddressChange(ctx, currentParams.PoolDelegatorAddress, params.PoolDelegatorAddress); err != nil { + return err + } + if err := k.validatePoolDelegatorAddress(ctx, params.PoolDelegatorAddress, allowBootstrap); err != nil { + return err + } + store := k.storeService.OpenKVStore(ctx) + bz := k.cdc.MustMarshal(¶ms) + return store.Set(types.ParamsKey, bz) +} + +// SetParams stores module params for runtime operations after validating pool +// delegator safety in strict contract-only mode. +// +// It rejects pool_delegator_address changes that would orphan tracked pending +// rebalance operations and disallows non-contract bootstrap exceptions. +func (k Keeper) SetParams(ctx context.Context, params types.Params) error { + return k.setParams(ctx, params, false) +} + +// SetParamsForGenesis stores module params during module initialization. +// +// This keeps bootstrap compatibility for pool_delegator_address values that do +// not yet have an auth account record. +func (k Keeper) SetParamsForGenesis(ctx context.Context, params types.Params) error { + return k.setParams(ctx, params, true) +} + +func (k Keeper) hasPendingRedelegationsForDelegator(ctx context.Context, del sdk.AccAddress) (bool, error) { + if del.Empty() { + return false, nil + } + entries, err := k.GetAllPendingRedelegations(ctx) + if err != nil { + return false, err + } + for _, entry := range entries { + if entry.DelegatorAddress == del.String() { + return true, nil + } + } + return false, nil +} + +// validatePoolDelegatorAddressChange prevents changing/clearing +// pool_delegator_address while pool-tracked pending state exists. +func (k Keeper) validatePoolDelegatorAddressChange(ctx context.Context, current, next string) error { + if current == next || current == "" { + return nil + } + + currentDel, err := sdk.AccAddressFromBech32(current) + if err != nil { + return fmt.Errorf("invalid current pool_delegator_address: %w", err) + } + hasRedelegations, err := k.hasPendingRedelegationsForDelegator(ctx, currentDel) + if err != nil { + return err + } + if hasRedelegations { + return fmt.Errorf("cannot change pool_delegator_address while pending redelegations exist for current pool delegator") + } + + if next == "" { + return nil + } + nextDel, err := sdk.AccAddressFromBech32(next) + if err != nil { + return fmt.Errorf("invalid next pool_delegator_address: %w", err) + } + hasRedelegations, err = k.hasPendingRedelegationsForDelegator(ctx, nextDel) + if err != nil { + return err + } + if hasRedelegations { + return fmt.Errorf("cannot change pool_delegator_address while pending redelegations exist for next pool delegator") + } + return nil +} + +// GetPoolDelegatorAddress returns the configured pool delegator address (empty if not set). +func (k Keeper) GetPoolDelegatorAddress(ctx context.Context) (sdk.AccAddress, error) { + params, err := k.GetParams(ctx) + if err != nil { + return nil, err + } + if params.PoolDelegatorAddress == "" { + return sdk.AccAddress{}, nil + } + return sdk.AccAddressFromBech32(params.PoolDelegatorAddress) +} + +// GetMaxTargetValidators returns MaxTargetValidators from params. +func (k Keeper) GetMaxTargetValidators(ctx context.Context) (uint32, error) { + params, err := k.GetParams(ctx) + if err != nil { + return 0, err + } + return params.MaxTargetValidators, nil +} + +// GetRebalanceThresholdBP returns RebalanceThresholdBP from params. +func (k Keeper) GetRebalanceThresholdBP(ctx context.Context) (uint32, error) { + params, err := k.GetParams(ctx) + if err != nil { + return 0, err + } + return params.RebalanceThresholdBp, nil +} + +// GetMaxOpsPerBlock returns MaxOpsPerBlock from params. +func (k Keeper) GetMaxOpsPerBlock(ctx context.Context) (uint32, error) { + params, err := k.GetParams(ctx) + if err != nil { + return 0, err + } + return params.MaxOpsPerBlock, nil +} + +// GetMaxMovePerOp returns MaxMovePerOp from params (as math.Int; zero means no cap). +func (k Keeper) GetMaxMovePerOp(ctx context.Context) (math.Int, error) { + params, err := k.GetParams(ctx) + if err != nil { + return math.ZeroInt(), err + } + if params.MaxMovePerOp.IsNil() { + return math.ZeroInt(), nil + } + return params.MaxMovePerOp, nil +} diff --git a/x/poolrebalancer/keeper/pool_delegator.go b/x/poolrebalancer/keeper/pool_delegator.go new file mode 100644 index 00000000..dd7231b9 --- /dev/null +++ b/x/poolrebalancer/keeper/pool_delegator.go @@ -0,0 +1,55 @@ +package keeper + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/common" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// validatePoolDelegatorAddress enforces pool_delegator_address safety. +// +// Contract-only policy: a non-empty address must be validated with IsContract on the EVM keeper, +// except bootstrap when allowBootstrap is true and auth has no account yet. +// User accounts with signing keys are rejected. +// There is no module-account shortcut. Non-empty pool address requires a non-nil evm keeper. +func (k Keeper) validatePoolDelegatorAddress(ctx context.Context, poolDelStr string, allowBootstrap bool) error { + if poolDelStr == "" { + return nil + } + poolDel, err := sdk.AccAddressFromBech32(poolDelStr) + if err != nil { + return fmt.Errorf("invalid pool_delegator_address: %w", err) + } + + if k.accountKeeper == nil && k.evmKeeper == nil { + return fmt.Errorf("pool_delegator_address requires account keeper or evm keeper for validation") + } + + sdkCtx := sdk.UnwrapSDKContext(ctx) + + var acc sdk.AccountI + if k.accountKeeper != nil { + acc = k.accountKeeper.GetAccount(ctx, poolDel) + if acc != nil { + if pk := acc.GetPubKey(); pk != nil && len(pk.Bytes()) > 0 { + return fmt.Errorf("pool_delegator_address cannot be a user account with signing keys") + } + } + } + + if k.evmKeeper == nil { + return fmt.Errorf("pool_delegator_address requires evm keeper when set") + } + + if k.evmKeeper.IsContract(sdkCtx, common.BytesToAddress(poolDel.Bytes())) { + return nil + } + if allowBootstrap && k.accountKeeper != nil && acc == nil { + // Bootstrap: params may be set before the contract account exists in auth. + return nil + } + return fmt.Errorf("pool_delegator_address must be an EVM contract when evm keeper is configured") +} diff --git a/x/poolrebalancer/keeper/rebalance.go b/x/poolrebalancer/keeper/rebalance.go new file mode 100644 index 00000000..a7ca18da --- /dev/null +++ b/x/poolrebalancer/keeper/rebalance.go @@ -0,0 +1,409 @@ +package keeper + +import ( + "context" + "fmt" + "sort" + "strconv" + + "github.com/cosmos/evm/x/poolrebalancer/types" + + "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// GetTargetBondedValidators returns the top bonded validators by power. +// The result size is capped by the module param MaxTargetValidators and preserves staking's power ordering. +// CommunityPool stake automation delegates through the staking precompile using the same bonded-validator +// query family, while rebalancing intentionally uses this top-power target set for drift correction. +func (k Keeper) GetTargetBondedValidators(ctx context.Context) ([]sdk.ValAddress, error) { + maxN, err := k.GetMaxTargetValidators(ctx) + if err != nil { + return nil, err + } + if maxN == 0 { + return nil, fmt.Errorf("MaxTargetValidators must be > 0") + } + + vals, err := k.stakingKeeper.GetBondedValidatorsByPower(ctx) + if err != nil { + return nil, err + } + + n := int(maxN) + if n > len(vals) { + n = len(vals) + } + + out := make([]sdk.ValAddress, 0, n) + for i := 0; i < n; i++ { + valAddr, err := sdk.ValAddressFromBech32(vals[i].OperatorAddress) + if err != nil { + return nil, err + } + out = append(out, valAddr) + } + return out, nil +} + +// GetDelegatorStakeByValidator returns the delegator's bonded stake per validator (in tokens, truncated). +// The returned map is keyed by validator operator address (bech32), plus the total across all validators. +func (k Keeper) GetDelegatorStakeByValidator(ctx context.Context, del sdk.AccAddress) (map[string]math.Int, math.Int, error) { + delegations, err := k.getAllDelegatorDelegations(ctx, del) + if err != nil { + return nil, math.ZeroInt(), err + } + + stakeByValidator := make(map[string]math.Int, len(delegations)) + total := math.ZeroInt() + + for _, d := range delegations { + valAddr, err := sdk.ValAddressFromBech32(d.ValidatorAddress) + if err != nil { + return nil, math.ZeroInt(), err + } + + val, err := k.stakingKeeper.GetValidator(ctx, valAddr) + if err != nil { + return nil, math.ZeroInt(), err + } + + // Convert shares -> tokens and truncate to integer tokens. + tokensDec := val.TokensFromSharesTruncated(d.Shares) + tokensInt := tokensDec.TruncateInt() + if tokensInt.IsZero() { + continue + } + + key := valAddr.String() + prev, ok := stakeByValidator[key] + if ok { + stakeByValidator[key] = prev.Add(tokensInt) + } else { + stakeByValidator[key] = tokensInt + } + total = total.Add(tokensInt) + } + + return stakeByValidator, total, nil +} + +// EqualWeightTarget computes an equal-weight target distribution across the given validator set. +// Any remainder from integer division is assigned deterministically to the first validators. +func (k Keeper) EqualWeightTarget(totalStake math.Int, targetValidators []sdk.ValAddress) (map[string]math.Int, error) { + n := len(targetValidators) + if n == 0 { + return nil, fmt.Errorf("target validators list is empty") + } + if totalStake.IsNegative() { + return nil, fmt.Errorf("total stake cannot be negative") + } + + nInt := math.NewInt(int64(n)) + base := totalStake.Quo(nInt) + remainderCount := totalStake.Mod(nInt).Int64() + + out := make(map[string]math.Int, n) + for i, val := range targetValidators { + amt := base + if int64(i) < remainderCount { + amt = amt.Add(math.OneInt()) + } + out[val.String()] = amt + } + return out, nil +} + +// ComputeDeltas returns target-current per validator and applies the rebalance threshold. +// Deltas within the threshold are treated as zero. +func (k Keeper) ComputeDeltas(target, current map[string]math.Int, totalStake math.Int, bp uint32) (map[string]math.Int, error) { + threshold := totalStake.Mul(math.NewInt(int64(bp))).Quo(math.NewInt(10_000)) + + allKeys := make(map[string]struct{}) + for key := range target { + allKeys[key] = struct{}{} + } + for key := range current { + allKeys[key] = struct{}{} + } + + deltas := make(map[string]math.Int, len(allKeys)) + for key := range allKeys { + t := target[key] + if t.IsNil() { + t = math.ZeroInt() + } + c := current[key] + if c.IsNil() { + c = math.ZeroInt() + } + delta := t.Sub(c) + if delta.Abs().LT(threshold) { + delta = math.ZeroInt() + } + deltas[key] = delta + } + return deltas, nil +} + +func minInt(a, b math.Int) math.Int { + if a.LT(b) { + return a + } + return b +} + +// filterTargetValidators excludes validators from same-block rebalance destinations. +// When a validator was slashed in the previous block, poolrebalancer avoids targeting it in the +// current block and recomputes equal-weight targets across the remaining candidates. +func filterTargetValidators(targetValidators []sdk.ValAddress, excluded map[string]struct{}) []sdk.ValAddress { + if len(excluded) == 0 { + return targetValidators + } + + out := make([]sdk.ValAddress, 0, len(targetValidators)) + for _, val := range targetValidators { + if _, skip := excluded[val.String()]; skip { + continue + } + out = append(out, val) + } + return out +} + +func (k Keeper) emitRedelegationFailureEvent(ctx context.Context, del sdk.AccAddress, srcVal, dstVal sdk.ValAddress, coin sdk.Coin, reason string) { + sdkCtx := sdk.UnwrapSDKContext(ctx) + sdkCtx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeRedelegationFailed, + sdk.NewAttribute(types.AttributeKeyDelegator, del.String()), + sdk.NewAttribute(types.AttributeKeySrcValidator, srcVal.String()), + sdk.NewAttribute(types.AttributeKeyDstValidator, dstVal.String()), + sdk.NewAttribute(types.AttributeKeyAmount, coin.Amount.String()), + sdk.NewAttribute(types.AttributeKeyDenom, coin.Denom), + sdk.NewAttribute(types.AttributeKeyReason, reason), + ), + ) +} + +// PickBestRedelegation selects a single (src, dst, amount) move based on deltas. +// Ties are broken deterministically by (src,dst) ordering. If maxMove is non-zero, it caps the amount. +func (k Keeper) PickBestRedelegation( + deltas map[string]math.Int, + keys []string, + blocked map[string]map[string]struct{}, + maxMove math.Int, +) (src string, dst string, amt math.Int, ok bool) { + return k.pickBestRedelegationWithRestrictions(deltas, keys, blocked, maxMove, nil, nil) +} + +// pickBestRedelegationWithRestrictions optionally constrains source and destination validators. +// Slash-priority scheduling uses this to force moves away from previously slashed validators before +// falling back to the generic drift-based picker. +func (k Keeper) pickBestRedelegationWithRestrictions( + deltas map[string]math.Int, + keys []string, + blocked map[string]map[string]struct{}, + maxMove math.Int, + allowedSrc map[string]struct{}, + excludedDst map[string]struct{}, +) (src string, dst string, amt math.Int, ok bool) { + bestAmt := math.ZeroInt() + bestDstNeed := math.ZeroInt() + bestSrc := "" + bestDst := "" + + for _, s := range keys { + if allowedSrc != nil { + if _, ok := allowedSrc[s]; !ok { + continue + } + } + ds := deltas[s] + if !ds.IsNegative() { + continue + } + srcOver := ds.Abs() + for _, d := range keys { + if excludedDst != nil { + if _, excluded := excludedDst[d]; excluded { + continue + } + } + dd := deltas[d] + if !dd.IsPositive() { + continue + } + if m, exists := blocked[s]; exists { + if _, isBlocked := m[d]; isBlocked { + continue + } + } + move := minInt(srcOver, dd) + if !maxMove.IsZero() { + move = minInt(move, maxMove) + } + if move.IsZero() { + continue + } + // Prefer larger moves. + // If move ties (common when capped), prefer destination with larger deficit. + // Final tie-break stays deterministic on (src,dst). + if move.GT(bestAmt) || + (move.Equal(bestAmt) && (dd.GT(bestDstNeed) || + (dd.Equal(bestDstNeed) && (s < bestSrc || (s == bestSrc && d < bestDst))))) { + bestAmt = move + bestDstNeed = dd + bestSrc = s + bestDst = d + } + } + } + + if bestAmt.IsZero() { + return "", "", math.ZeroInt(), false + } + return bestSrc, bestDst, bestAmt, true +} + +// ProcessRebalance compares current stake to target and applies up to MaxOpsPerBlock operations. +// It is intended to be called from EndBlock after pending queues are cleaned up. +// +// Slash-aware behavior: +// - previous-block slashed validators are excluded from same-block destinations/targets +// - redelegation priority first tries to move stake away from those validators +// - if all target validators were slashed in the previous block, rebalance cleanly no-ops +func (k Keeper) ProcessRebalance(ctx context.Context) error { + // Fast-path exits: not configured, no targets, or nothing bonded. + del, err := k.GetPoolDelegatorAddress(ctx) + if err != nil { + return err + } + if del.Empty() { + return nil + } + slashedVals, err := k.getPreviousBlockSlashedValidatorsOrEmpty(ctx) + if err != nil { + return err + } + targetVals, err := k.GetTargetBondedValidators(ctx) + if err != nil { + return err + } + targetVals = filterTargetValidators(targetVals, slashedVals) + if len(targetVals) == 0 { + // Conservatively do nothing for this block when every same-block target was slashed + // in the previous block. + return nil + } + stakeByValidator, total, err := k.GetDelegatorStakeByValidator(ctx, del) + if err != nil { + return err + } + if total.IsZero() { + return nil + } + + // Load params once for this rebalance pass. + params, err := k.GetParams(ctx) + if err != nil { + return err + } + + // Compute equal-weight targets and deltas (threshold applied inside ComputeDeltas). + target, err := k.EqualWeightTarget(total, targetVals) + if err != nil { + return err + } + deltas, err := k.ComputeDeltas(target, stakeByValidator, total, params.RebalanceThresholdBp) + if err != nil { + return err + } + + // Nothing exceeds the threshold. + allZero := true + for _, d := range deltas { + if !d.IsZero() { + allZero = false + break + } + } + if allZero { + return nil + } + + // Apply params to the operation loop. + maxOps := params.MaxOpsPerBlock + bondDenom, err := k.stakingKeeper.BondDenom(ctx) + if err != nil { + return err + } + + // Apply operations using redelegations only. + blocked := make(map[string]map[string]struct{}) + keys := make([]string, 0, len(deltas)) + for key := range deltas { + keys = append(keys, key) + } + sort.Strings(keys) + + maxMove := params.MaxMovePerOp + if maxMove.IsNil() { + maxMove = math.ZeroInt() + } + + var opsDone uint32 + for opsDone < maxOps { + srcKey, dstKey, amt, ok := "", "", math.ZeroInt(), false + if len(slashedVals) > 0 { + srcKey, dstKey, amt, ok = k.pickBestRedelegationWithRestrictions(deltas, keys, blocked, maxMove, slashedVals, slashedVals) + } + if !ok { + srcKey, dstKey, amt, ok = k.PickBestRedelegation(deltas, keys, blocked, maxMove) + } + + if ok { + srcVal, err := sdk.ValAddressFromBech32(srcKey) + if err != nil { + return err + } + dstVal, err := sdk.ValAddressFromBech32(dstKey) + if err != nil { + return err + } + coin := sdk.NewCoin(bondDenom, amt) + + if k.CanBeginRedelegation(ctx, del, srcVal, dstVal, coin) { + if _, err := k.BeginTrackedRedelegation(ctx, del, srcVal, dstVal, coin); err == nil { + deltas[srcKey] = deltas[srcKey].Add(amt) + deltas[dstKey] = deltas[dstKey].Sub(amt) + opsDone++ + continue + } else { + k.emitRedelegationFailureEvent(ctx, del, srcVal, dstVal, coin, err.Error()) + } + } + + if blocked[srcKey] == nil { + blocked[srcKey] = make(map[string]struct{}) + } + blocked[srcKey][dstKey] = struct{}{} + continue + } + break + } + + if opsDone > 0 { + sdkCtx := sdk.UnwrapSDKContext(ctx) + sdkCtx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeRebalanceSummary, + sdk.NewAttribute(types.AttributeKeyDelegator, del.String()), + sdk.NewAttribute(types.AttributeKeyOpsDone, strconv.FormatUint(uint64(opsDone), 10)), + ), + ) + } + + return nil +} diff --git a/x/poolrebalancer/keeper/rebalance_events_test.go b/x/poolrebalancer/keeper/rebalance_events_test.go new file mode 100644 index 00000000..8f630dd2 --- /dev/null +++ b/x/poolrebalancer/keeper/rebalance_events_test.go @@ -0,0 +1,42 @@ +package keeper + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/evm/x/poolrebalancer/types" +) + +func TestEmitRedelegationFailureEvent(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + src := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + dst := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + coin := sdk.NewInt64Coin("stake", 42) + reason := "begin redelegation failed" + + k.emitRedelegationFailureEvent(ctx, del, src, dst, coin, reason) + + events := sdk.UnwrapSDKContext(ctx).EventManager().Events() + require.NotEmpty(t, events) + + ev := events[len(events)-1] + require.Equal(t, types.EventTypeRedelegationFailed, ev.Type) + + attrs := map[string]string{} + for _, attr := range ev.Attributes { + attrs[string(attr.Key)] = string(attr.Value) + } + + require.Equal(t, del.String(), attrs[types.AttributeKeyDelegator]) + require.Equal(t, src.String(), attrs[types.AttributeKeySrcValidator]) + require.Equal(t, dst.String(), attrs[types.AttributeKeyDstValidator]) + require.Equal(t, coin.Amount.String(), attrs[types.AttributeKeyAmount]) + require.Equal(t, coin.Denom, attrs[types.AttributeKeyDenom]) + require.Equal(t, reason, attrs[types.AttributeKeyReason]) +} diff --git a/x/poolrebalancer/keeper/rebalance_process_test.go b/x/poolrebalancer/keeper/rebalance_process_test.go new file mode 100644 index 00000000..8520dcde --- /dev/null +++ b/x/poolrebalancer/keeper/rebalance_process_test.go @@ -0,0 +1,661 @@ +package keeper + +import ( + "bytes" + "context" + "errors" + "fmt" + "strconv" + "testing" + "time" + + storetypes "cosmossdk.io/store/types" + abci "github.com/cometbft/cometbft/abci/types" + "github.com/stretchr/testify/require" + + "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + "github.com/cosmos/evm/x/poolrebalancer/types" +) + +type recordedBeginRedelegation struct { + del sdk.AccAddress + srcVal, dstVal sdk.ValAddress + shares math.LegacyDec +} + +type mockStakingKeeper struct { + vals []stakingtypes.Validator + validatorByAddr map[string]stakingtypes.Validator + delegations []stakingtypes.Delegation + delegationByValAddr map[string]stakingtypes.Delegation + failBeginRedelegation bool + + beginRedelegationRecords []recordedBeginRedelegation +} + +func (m *mockStakingKeeper) GetBondedValidatorsByPower(ctx context.Context) ([]stakingtypes.Validator, error) { + return m.vals, nil +} + +func (m *mockStakingKeeper) GetDelegatorDelegations(ctx context.Context, delegator sdk.AccAddress, maxRetrieve uint16) ([]stakingtypes.Delegation, error) { + return m.delegations, nil +} + +func (m *mockStakingKeeper) DelegatorDelegations(ctx context.Context, req *stakingtypes.QueryDelegatorDelegationsRequest) (*stakingtypes.QueryDelegatorDelegationsResponse, error) { + start := 0 + if req != nil && req.Pagination != nil && len(req.Pagination.Key) > 0 { + parsed, err := strconv.Atoi(string(req.Pagination.Key)) + if err != nil { + return nil, fmt.Errorf("invalid pagination key: %w", err) + } + start = parsed + } + if start > len(m.delegations) { + start = len(m.delegations) + } + limit := len(m.delegations) + if req != nil && req.Pagination != nil && req.Pagination.Limit > 0 && int(req.Pagination.Limit) < limit { + limit = int(req.Pagination.Limit) + } + end := start + limit + if end > len(m.delegations) { + end = len(m.delegations) + } + responses := make([]stakingtypes.DelegationResponse, 0, end-start) + for _, delegation := range m.delegations[start:end] { + responses = append(responses, stakingtypes.DelegationResponse{Delegation: delegation}) + } + var nextKey []byte + if end < len(m.delegations) { + nextKey = []byte(strconv.Itoa(end)) + } + return &stakingtypes.QueryDelegatorDelegationsResponse{ + DelegationResponses: responses, + Pagination: &query.PageResponse{NextKey: nextKey}, + }, nil +} + +func (m *mockStakingKeeper) GetValidator(ctx context.Context, addr sdk.ValAddress) (stakingtypes.Validator, error) { + val, ok := m.validatorByAddr[addr.String()] + if !ok { + return stakingtypes.Validator{}, errors.New("validator not found") + } + return val, nil +} + +func (m *mockStakingKeeper) GetDelegation(ctx context.Context, delegatorAddr sdk.AccAddress, valAddr sdk.ValAddress) (stakingtypes.Delegation, error) { + delegation, ok := m.delegationByValAddr[valAddr.String()] + if !ok { + return stakingtypes.Delegation{}, errors.New("delegation not found") + } + return delegation, nil +} + +func (m *mockStakingKeeper) BeginRedelegation(ctx context.Context, delAddr sdk.AccAddress, valSrcAddr, valDstAddr sdk.ValAddress, sharesAmount math.LegacyDec) (completionTime time.Time, err error) { + m.beginRedelegationRecords = append(m.beginRedelegationRecords, recordedBeginRedelegation{ + del: delAddr, srcVal: valSrcAddr, dstVal: valDstAddr, shares: sharesAmount, + }) + if m.failBeginRedelegation { + return time.Time{}, errors.New("mock begin redelegation failed") + } + return sdk.UnwrapSDKContext(ctx).BlockTime().Add(time.Hour), nil +} + +func (m *mockStakingKeeper) UnbondingTime(ctx context.Context) (time.Duration, error) { + return time.Hour, nil +} + +func (m *mockStakingKeeper) BondDenom(ctx context.Context) (string, error) { + return "stake", nil +} + +func newProcessRebalanceKeeper(t *testing.T, sk *mockStakingKeeper) (sdk.Context, Keeper) { + t.Helper() + + storeKey := storetypes.NewKVStoreKey(types.ModuleName) + tKey := storetypes.NewTransientStoreKey("transient_test") + ctx := testutil.DefaultContext(storeKey, tKey) + ctx = ctx.WithBlockTime(time.Now().UTC()) + + storeService := runtime.NewKVStoreService(storeKey) + cdc := moduletestutil.MakeTestEncodingConfig().Codec + authority := sdk.AccAddress(bytes.Repeat([]byte{9}, 20)) + k := NewKeeper(cdc, storeService, tKey, sk, sk, nil, authority, &mockEVMKeeper{}, nil) + + return ctx, k +} + +func setupBasicRebalanceState(t *testing.T, ctx sdk.Context, k Keeper) (sdk.AccAddress, sdk.ValAddress, sdk.ValAddress) { + t.Helper() + + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + srcVal := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + dstVal := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + + params := types.DefaultParams() + params.PoolDelegatorAddress = del.String() + params.MaxTargetValidators = 2 + params.RebalanceThresholdBp = 0 + params.MaxOpsPerBlock = 1 + params.MaxMovePerOp = math.ZeroInt() + require.NoError(t, k.SetParams(ctx, params)) + + return del, srcVal, dstVal +} + +func attrsToMap(attrs []abci.EventAttribute) map[string]string { + out := make(map[string]string, len(attrs)) + for _, attr := range attrs { + out[attr.Key] = attr.Value + } + return out +} + +func TestGetTargetBondedValidators_UsesPowerOrderAndMaxTargetCap(t *testing.T) { + valLowAddr := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + valHighAddr := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + valMidAddr := sdk.ValAddress(bytes.Repeat([]byte{4}, 20)) + + valLow := stakingtypes.Validator{OperatorAddress: valLowAddr.String()} + valHigh := stakingtypes.Validator{OperatorAddress: valHighAddr.String()} + valMid := stakingtypes.Validator{OperatorAddress: valMidAddr.String()} + + sk := &mockStakingKeeper{ + // Mock the staking keeper's bonded-by-power query order directly. The rebalancer policy is to + // preserve this order and cap it, not to sort by operator address or mirror contract stake remainder order. + vals: []stakingtypes.Validator{valHigh, valMid, valLow}, + } + ctx, k := newProcessRebalanceKeeper(t, sk) + + params := types.DefaultParams() + params.PoolDelegatorAddress = sdk.AccAddress(bytes.Repeat([]byte{1}, 20)).String() + params.MaxTargetValidators = 2 + require.NoError(t, k.SetParams(ctx, params)) + + targets, err := k.GetTargetBondedValidators(ctx) + require.NoError(t, err) + require.Equal(t, []sdk.ValAddress{valHighAddr, valMidAddr}, targets) +} + +func TestProcessRebalance_EmitsRedelegationFailedEvent(t *testing.T) { + srcVal := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + dstVal := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + + srcValidator := stakingtypes.Validator{ + OperatorAddress: srcVal.String(), + Tokens: math.NewInt(100), + DelegatorShares: math.LegacyNewDec(100), + } + dstValidator := stakingtypes.Validator{ + OperatorAddress: dstVal.String(), + Tokens: math.NewInt(100), + DelegatorShares: math.LegacyNewDec(100), + } + + sk := &mockStakingKeeper{ + vals: []stakingtypes.Validator{srcValidator, dstValidator}, + validatorByAddr: map[string]stakingtypes.Validator{ + srcVal.String(): srcValidator, + dstVal.String(): dstValidator, + }, + delegations: []stakingtypes.Delegation{ + { + DelegatorAddress: sdk.AccAddress(bytes.Repeat([]byte{1}, 20)).String(), + ValidatorAddress: srcVal.String(), + Shares: math.LegacyNewDec(100), + }, + }, + delegationByValAddr: map[string]stakingtypes.Delegation{ + srcVal.String(): { + DelegatorAddress: sdk.AccAddress(bytes.Repeat([]byte{1}, 20)).String(), + ValidatorAddress: srcVal.String(), + Shares: math.LegacyNewDec(100), + }, + }, + failBeginRedelegation: true, + } + + ctx, k := newProcessRebalanceKeeper(t, sk) + del, _, _ := setupBasicRebalanceState(t, ctx, k) + + require.NoError(t, k.ProcessRebalance(ctx)) + + events := sdk.UnwrapSDKContext(ctx).EventManager().Events() + found := false + for _, ev := range events { + if ev.Type != types.EventTypeRedelegationFailed { + continue + } + found = true + attrs := attrsToMap(ev.Attributes) + require.Equal(t, del.String(), attrs[types.AttributeKeyDelegator]) + require.Equal(t, srcVal.String(), attrs[types.AttributeKeySrcValidator]) + require.Equal(t, dstVal.String(), attrs[types.AttributeKeyDstValidator]) + require.Equal(t, "50", attrs[types.AttributeKeyAmount]) + require.Equal(t, "stake", attrs[types.AttributeKeyDenom]) + require.Contains(t, attrs[types.AttributeKeyReason], "mock begin redelegation failed") + } + require.True(t, found, "expected redelegation failure event") +} + +func TestProcessRebalance_EmitsOnlyRedelegationFailureWhenRedelegationFails(t *testing.T) { + srcVal := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + dstVal := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + + srcValidator := stakingtypes.Validator{ + OperatorAddress: srcVal.String(), + Tokens: math.NewInt(100), + DelegatorShares: math.LegacyNewDec(100), + } + dstValidator := stakingtypes.Validator{ + OperatorAddress: dstVal.String(), + Tokens: math.NewInt(100), + DelegatorShares: math.LegacyNewDec(100), + } + + sk := &mockStakingKeeper{ + vals: []stakingtypes.Validator{srcValidator, dstValidator}, + validatorByAddr: map[string]stakingtypes.Validator{ + srcVal.String(): srcValidator, + dstVal.String(): dstValidator, + }, + delegations: []stakingtypes.Delegation{ + { + DelegatorAddress: sdk.AccAddress(bytes.Repeat([]byte{1}, 20)).String(), + ValidatorAddress: srcVal.String(), + Shares: math.LegacyNewDec(100), + }, + }, + delegationByValAddr: map[string]stakingtypes.Delegation{ + srcVal.String(): { + DelegatorAddress: sdk.AccAddress(bytes.Repeat([]byte{1}, 20)).String(), + ValidatorAddress: srcVal.String(), + Shares: math.LegacyNewDec(100), + }, + }, + failBeginRedelegation: true, + } + + ctx, k := newProcessRebalanceKeeper(t, sk) + del, _, _ := setupBasicRebalanceState(t, ctx, k) + + require.NoError(t, k.ProcessRebalance(ctx)) + + events := sdk.UnwrapSDKContext(ctx).EventManager().Events() + found := false + for _, ev := range events { + if ev.Type == types.EventTypeRedelegationFailed { + found = true + } + } + require.True(t, found, "expected redelegation failure event when begin redelegation fails") + _ = del + _ = srcVal +} + +// TestProcessRebalance_StopsWhenRedelegationBlocked ensures the loop stops when no +// eligible redelegation can be started. +func TestProcessRebalance_StopsWhenRedelegationBlocked(t *testing.T) { + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + valA := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + valB := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + valC := sdk.ValAddress(bytes.Repeat([]byte{4}, 20)) + + mkVal := func(addr sdk.ValAddress) stakingtypes.Validator { + return stakingtypes.Validator{ + OperatorAddress: addr.String(), + Tokens: math.NewInt(100), + DelegatorShares: math.LegacyNewDec(100), + } + } + valASt := mkVal(valA) + valBSt := mkVal(valB) + valCSt := mkVal(valC) + + sk := &mockStakingKeeper{ + vals: []stakingtypes.Validator{valASt, valBSt, valCSt}, + validatorByAddr: map[string]stakingtypes.Validator{ + valA.String(): valASt, + valB.String(): valBSt, + valC.String(): valCSt, + }, + delegations: []stakingtypes.Delegation{ + {DelegatorAddress: del.String(), ValidatorAddress: valA.String(), Shares: math.LegacyNewDec(90)}, + {DelegatorAddress: del.String(), ValidatorAddress: valB.String(), Shares: math.LegacyNewDec(70)}, + }, + delegationByValAddr: map[string]stakingtypes.Delegation{ + valA.String(): {DelegatorAddress: del.String(), ValidatorAddress: valA.String(), Shares: math.LegacyNewDec(90)}, + valB.String(): {DelegatorAddress: del.String(), ValidatorAddress: valB.String(), Shares: math.LegacyNewDec(70)}, + }, + failBeginRedelegation: true, + } + + ctx, k := newProcessRebalanceKeeper(t, sk) + + params := types.DefaultParams() + params.PoolDelegatorAddress = del.String() + params.MaxTargetValidators = 3 + params.RebalanceThresholdBp = 0 + params.MaxOpsPerBlock = 5 + params.MaxMovePerOp = math.ZeroInt() + require.NoError(t, k.SetParams(ctx, params)) + + require.NoError(t, k.ProcessRebalance(ctx)) + + var successOps int + for _, ev := range sdk.UnwrapSDKContext(ctx).EventManager().Events() { + switch ev.Type { + case types.EventTypeRebalanceSummary: + attrs := attrsToMap(ev.Attributes) + require.Equal(t, del.String(), attrs[types.AttributeKeyDelegator]) + successOps, _ = strconv.Atoi(attrs[types.AttributeKeyOpsDone]) + } + } + require.Equal(t, 0, successOps, "expected no successful ops when redelegations are blocked/failing") +} + +func TestProcessRebalance_RedelegationFailureDoesNotScheduleOps(t *testing.T) { + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + valA := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + valB := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + valC := sdk.ValAddress(bytes.Repeat([]byte{4}, 20)) + + mkVal := func(addr sdk.ValAddress) stakingtypes.Validator { + return stakingtypes.Validator{ + OperatorAddress: addr.String(), + Tokens: math.NewInt(100), + DelegatorShares: math.LegacyNewDec(100), + } + } + valASt := mkVal(valA) + valBSt := mkVal(valB) + valCSt := mkVal(valC) + + sk := &mockStakingKeeper{ + vals: []stakingtypes.Validator{valASt, valBSt, valCSt}, + validatorByAddr: map[string]stakingtypes.Validator{ + valA.String(): valASt, + valB.String(): valBSt, + valC.String(): valCSt, + }, + delegations: []stakingtypes.Delegation{ + {DelegatorAddress: del.String(), ValidatorAddress: valA.String(), Shares: math.LegacyNewDec(90)}, + {DelegatorAddress: del.String(), ValidatorAddress: valB.String(), Shares: math.LegacyNewDec(70)}, + }, + delegationByValAddr: map[string]stakingtypes.Delegation{ + valA.String(): {DelegatorAddress: del.String(), ValidatorAddress: valA.String(), Shares: math.LegacyNewDec(90)}, + valB.String(): {DelegatorAddress: del.String(), ValidatorAddress: valB.String(), Shares: math.LegacyNewDec(70)}, + }, + failBeginRedelegation: true, + } + + ctx, k := newProcessRebalanceKeeper(t, sk) + + params := types.DefaultParams() + params.PoolDelegatorAddress = del.String() + params.MaxTargetValidators = 3 + params.RebalanceThresholdBp = 0 + params.MaxOpsPerBlock = 5 + params.MaxMovePerOp = math.ZeroInt() + require.NoError(t, k.SetParams(ctx, params)) + + require.NoError(t, k.ProcessRebalance(ctx)) + + events := sdk.UnwrapSDKContext(ctx).EventManager().Events() + var successOps int + var sawRedelegationFailure bool + for _, ev := range events { + switch ev.Type { + case types.EventTypeRedelegationFailed: + sawRedelegationFailure = true + case types.EventTypeRebalanceSummary: + attrs := attrsToMap(ev.Attributes) + successOps, _ = strconv.Atoi(attrs[types.AttributeKeyOpsDone]) + } + } + + require.True(t, sawRedelegationFailure, "expected redelegation failure event") + require.Equal(t, 0, successOps, "expected no successful ops when redelegation cannot begin") + _ = valB +} + +// TestProcessRebalance_HappyPath_SingleRedelegation asserts one successful scheduling op maps to exactly +// one staking BeginRedelegation with the shares implied by token amount (equal-weight target, max_ops=1). +func TestProcessRebalance_HappyPath_SingleRedelegation(t *testing.T) { + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + srcVal := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + dstVal := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + + srcValidator := stakingtypes.Validator{ + OperatorAddress: srcVal.String(), + Tokens: math.NewInt(100), + DelegatorShares: math.LegacyNewDec(100), + } + dstValidator := stakingtypes.Validator{ + OperatorAddress: dstVal.String(), + Tokens: math.NewInt(100), + DelegatorShares: math.LegacyNewDec(100), + } + + sk := &mockStakingKeeper{ + vals: []stakingtypes.Validator{srcValidator, dstValidator}, + validatorByAddr: map[string]stakingtypes.Validator{ + srcVal.String(): srcValidator, + dstVal.String(): dstValidator, + }, + delegations: []stakingtypes.Delegation{ + { + DelegatorAddress: del.String(), + ValidatorAddress: srcVal.String(), + Shares: math.LegacyNewDec(100), + }, + }, + delegationByValAddr: map[string]stakingtypes.Delegation{ + srcVal.String(): { + DelegatorAddress: del.String(), + ValidatorAddress: srcVal.String(), + Shares: math.LegacyNewDec(100), + }, + }, + } + + ctx, k := newProcessRebalanceKeeper(t, sk) + gotDel, gotSrc, gotDst := setupBasicRebalanceState(t, ctx, k) + require.Equal(t, del.String(), gotDel.String()) + require.Equal(t, srcVal.String(), gotSrc.String()) + require.Equal(t, dstVal.String(), gotDst.String()) + + require.NoError(t, k.ProcessRebalance(ctx)) + + require.Len(t, sk.beginRedelegationRecords, 1, "expected exactly one BeginRedelegation") + + rec := sk.beginRedelegationRecords[0] + require.Equal(t, del.String(), rec.del.String()) + require.Equal(t, srcVal.String(), rec.srcVal.String()) + require.Equal(t, dstVal.String(), rec.dstVal.String()) + // 100 stake total, 2 validators → target 50 each; move 50 tokens from src → dst; 1:1 token/share. + require.True(t, rec.shares.Equal(math.LegacyNewDec(50)), "shares=%s", rec.shares.String()) + + events := sdk.UnwrapSDKContext(ctx).EventManager().Events() + var sawStart, sawSummary bool + for _, ev := range events { + switch ev.Type { + case types.EventTypeRedelegationStarted: + sawStart = true + attrs := attrsToMap(ev.Attributes) + require.Equal(t, "50", attrs[types.AttributeKeyAmount]) + require.Equal(t, "stake", attrs[types.AttributeKeyDenom]) + case types.EventTypeRebalanceSummary: + sawSummary = true + attrs := attrsToMap(ev.Attributes) + require.Equal(t, "1", attrs[types.AttributeKeyOpsDone]) + } + } + require.True(t, sawStart, "expected redelegation started event") + require.True(t, sawSummary, "expected rebalance summary event") +} + +func TestProcessRebalance_PrioritizesSlashedValidatorSource(t *testing.T) { + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + valA := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + valB := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + valC := sdk.ValAddress(bytes.Repeat([]byte{4}, 20)) + + mkVal := func(addr sdk.ValAddress, tokens int64) stakingtypes.Validator { + return stakingtypes.Validator{ + OperatorAddress: addr.String(), + Tokens: math.NewInt(tokens), + DelegatorShares: math.LegacyNewDec(tokens), + } + } + + sk := &mockStakingKeeper{ + vals: []stakingtypes.Validator{mkVal(valA, 100), mkVal(valB, 100), mkVal(valC, 100)}, + validatorByAddr: map[string]stakingtypes.Validator{ + valA.String(): mkVal(valA, 100), + valB.String(): mkVal(valB, 100), + valC.String(): mkVal(valC, 100), + }, + delegations: []stakingtypes.Delegation{ + {DelegatorAddress: del.String(), ValidatorAddress: valA.String(), Shares: math.LegacyNewDec(50)}, + {DelegatorAddress: del.String(), ValidatorAddress: valB.String(), Shares: math.LegacyNewDec(70)}, + }, + delegationByValAddr: map[string]stakingtypes.Delegation{ + valA.String(): {DelegatorAddress: del.String(), ValidatorAddress: valA.String(), Shares: math.LegacyNewDec(50)}, + valB.String(): {DelegatorAddress: del.String(), ValidatorAddress: valB.String(), Shares: math.LegacyNewDec(70)}, + }, + } + + ctx, k := newProcessRebalanceKeeper(t, sk) + params := types.DefaultParams() + params.PoolDelegatorAddress = del.String() + params.MaxTargetValidators = 3 + params.RebalanceThresholdBp = 0 + params.MaxOpsPerBlock = 1 + params.MaxMovePerOp = math.ZeroInt() + require.NoError(t, k.SetParams(ctx, params)) + require.NoError(t, k.setPreviousBlockSlashedValidators(ctx, map[string]struct{}{valA.String(): {}})) + + require.NoError(t, k.ProcessRebalance(ctx)) + + require.Len(t, sk.beginRedelegationRecords, 1) + rec := sk.beginRedelegationRecords[0] + require.Equal(t, valA.String(), rec.srcVal.String(), "slash-priority should move away from slashed validator first") + require.Equal(t, valC.String(), rec.dstVal.String()) + require.True(t, rec.shares.Equal(math.LegacyNewDec(50)), "expected full move away from slashed validator after target exclusion") +} + +func TestProcessRebalance_ExcludesSlashedValidatorFromDestinations(t *testing.T) { + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + valA := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + valB := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + valC := sdk.ValAddress(bytes.Repeat([]byte{4}, 20)) + + mkVal := func(addr sdk.ValAddress, tokens int64) stakingtypes.Validator { + return stakingtypes.Validator{ + OperatorAddress: addr.String(), + Tokens: math.NewInt(tokens), + DelegatorShares: math.LegacyNewDec(tokens), + } + } + + sk := &mockStakingKeeper{ + vals: []stakingtypes.Validator{mkVal(valA, 100), mkVal(valB, 100), mkVal(valC, 100)}, + validatorByAddr: map[string]stakingtypes.Validator{ + valA.String(): mkVal(valA, 100), + valB.String(): mkVal(valB, 100), + valC.String(): mkVal(valC, 100), + }, + delegations: []stakingtypes.Delegation{ + {DelegatorAddress: del.String(), ValidatorAddress: valA.String(), Shares: math.LegacyNewDec(100)}, + }, + delegationByValAddr: map[string]stakingtypes.Delegation{ + valA.String(): {DelegatorAddress: del.String(), ValidatorAddress: valA.String(), Shares: math.LegacyNewDec(100)}, + }, + } + + ctx, k := newProcessRebalanceKeeper(t, sk) + params := types.DefaultParams() + params.PoolDelegatorAddress = del.String() + params.MaxTargetValidators = 3 + params.RebalanceThresholdBp = 0 + params.MaxOpsPerBlock = 1 + params.MaxMovePerOp = math.ZeroInt() + require.NoError(t, k.SetParams(ctx, params)) + require.NoError(t, k.setPreviousBlockSlashedValidators(ctx, map[string]struct{}{valB.String(): {}})) + + require.NoError(t, k.ProcessRebalance(ctx)) + + require.Len(t, sk.beginRedelegationRecords, 1) + rec := sk.beginRedelegationRecords[0] + require.Equal(t, valA.String(), rec.srcVal.String()) + require.Equal(t, valC.String(), rec.dstVal.String(), "slashed validator must not be chosen as same-block destination") + require.True(t, rec.shares.Equal(math.LegacyNewDec(50)), "expected recomputed unslashed target split") +} + +func TestProcessRebalance_SlashedPriorityStillNoSuccessfulOpsWhenRedelegationFails(t *testing.T) { + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + valA := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + valB := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + valC := sdk.ValAddress(bytes.Repeat([]byte{4}, 20)) + + mkVal := func(addr sdk.ValAddress, tokens int64) stakingtypes.Validator { + return stakingtypes.Validator{ + OperatorAddress: addr.String(), + Tokens: math.NewInt(tokens), + DelegatorShares: math.LegacyNewDec(tokens), + } + } + + sk := &mockStakingKeeper{ + vals: []stakingtypes.Validator{mkVal(valA, 100), mkVal(valB, 100), mkVal(valC, 100)}, + validatorByAddr: map[string]stakingtypes.Validator{ + valA.String(): mkVal(valA, 100), + valB.String(): mkVal(valB, 100), + valC.String(): mkVal(valC, 100), + }, + delegations: []stakingtypes.Delegation{ + {DelegatorAddress: del.String(), ValidatorAddress: valA.String(), Shares: math.LegacyNewDec(50)}, + {DelegatorAddress: del.String(), ValidatorAddress: valB.String(), Shares: math.LegacyNewDec(70)}, + }, + delegationByValAddr: map[string]stakingtypes.Delegation{ + valA.String(): {DelegatorAddress: del.String(), ValidatorAddress: valA.String(), Shares: math.LegacyNewDec(50)}, + valB.String(): {DelegatorAddress: del.String(), ValidatorAddress: valB.String(), Shares: math.LegacyNewDec(70)}, + }, + failBeginRedelegation: true, + } + + ctx, k := newProcessRebalanceKeeper(t, sk) + params := types.DefaultParams() + params.PoolDelegatorAddress = del.String() + params.MaxTargetValidators = 3 + params.RebalanceThresholdBp = 0 + params.MaxOpsPerBlock = 1 + params.MaxMovePerOp = math.ZeroInt() + require.NoError(t, k.SetParams(ctx, params)) + require.NoError(t, k.setPreviousBlockSlashedValidators(ctx, map[string]struct{}{valA.String(): {}})) + + require.NoError(t, k.ProcessRebalance(ctx)) + + events := sdk.UnwrapSDKContext(ctx).EventManager().Events() + var sawFailure bool + var sawSummary bool + for _, ev := range events { + switch ev.Type { + case types.EventTypeRedelegationFailed: + sawFailure = true + case types.EventTypeRebalanceSummary: + sawSummary = true + } + } + require.True(t, sawFailure, "expected redelegation failure event") + require.False(t, sawSummary, "expected no successful operations when redelegation starts fail") + _ = valA +} diff --git a/x/poolrebalancer/keeper/rebalance_test.go b/x/poolrebalancer/keeper/rebalance_test.go new file mode 100644 index 00000000..b5e8952e --- /dev/null +++ b/x/poolrebalancer/keeper/rebalance_test.go @@ -0,0 +1,392 @@ +package keeper_test + +import ( + "bytes" + "sort" + "strconv" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/evm/x/poolrebalancer/keeper" + "github.com/cosmos/evm/x/poolrebalancer/types" + + "cosmossdk.io/math" + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" + stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" +) + +// testKeeperWithParams creates a keeper backed by an in-memory store and seeds its params. +// The staking keeper is a zero value and must not be used by these unit tests. +func testKeeperWithParams(t *testing.T, rebalanceThresholdBP, maxMovePerOp string) (sdk.Context, keeper.Keeper) { + t.Helper() + + storeKey := storetypes.NewKVStoreKey(types.ModuleName) + tKey := storetypes.NewTransientStoreKey("transient_test") + ctx := testutil.DefaultContext(storeKey, tKey) + + storeService := runtime.NewKVStoreService(storeKey) + cdc := moduletestutil.MakeTestEncodingConfig().Codec + stakingKeeper := &stakingkeeper.Keeper{} // zero value; do not call staking methods + k := keeper.NewKeeper(cdc, storeService, tKey, stakingKeeper, nil, nil, sdk.AccAddress(bytes.Repeat([]byte{9}, 20)), nil, nil) + + bp, err := strconv.ParseUint(rebalanceThresholdBP, 10, 32) + require.NoError(t, err) + + params := types.DefaultParams() + params.RebalanceThresholdBp = uint32(bp) + amt, ok := math.NewIntFromString(maxMovePerOp) + require.True(t, ok, "invalid maxMovePerOp %q", maxMovePerOp) + params.MaxMovePerOp = amt + require.NoError(t, k.SetParams(ctx, params)) + + return ctx, k +} + +// threeValAddrs returns three deterministic validator addresses (for EqualWeightTarget tests). +func threeValAddrs() []sdk.ValAddress { + return []sdk.ValAddress{ + sdk.ValAddress(bytes.Repeat([]byte{1}, 20)), + sdk.ValAddress(bytes.Repeat([]byte{2}, 20)), + sdk.ValAddress(bytes.Repeat([]byte{3}, 20)), + } +} + +// --------------------------------------------------------------------------- +// 3.1 EqualWeightTarget +// --------------------------------------------------------------------------- + +// TestEqualWeightTarget_HappyPath: totalStake=1000, n=3; expect 334, 333, 333 (remainder 1 to first). +func TestEqualWeightTarget_HappyPath(t *testing.T) { + k := keeper.Keeper{} // zero value; method does not use store or staking + totalStake := math.NewInt(1000) + vals := threeValAddrs() + require.Len(t, vals, 3) + + out, err := k.EqualWeightTarget(totalStake, vals) + require.NoError(t, err) + require.Len(t, out, 3) + + // 1000 / 3 = 333, remainder 1 → first validator gets 334, others 333 + sum := math.ZeroInt() + for _, v := range vals { + key := v.String() + amt, ok := out[key] + require.True(t, ok, "missing key %s", key) + sum = sum.Add(amt) + } + require.True(t, sum.Equal(totalStake), "sum %s != totalStake %s", sum, totalStake) + + require.True(t, out[vals[0].String()].Equal(math.NewInt(334)), "first validator should get 334") + require.True(t, out[vals[1].String()].Equal(math.NewInt(333))) + require.True(t, out[vals[2].String()].Equal(math.NewInt(333))) +} + +// TestEqualWeightTarget_RemainderZero: totalStake=999, n=3; expect exactly 333 each. +func TestEqualWeightTarget_RemainderZero(t *testing.T) { + k := keeper.Keeper{} + totalStake := math.NewInt(999) + vals := threeValAddrs() + + out, err := k.EqualWeightTarget(totalStake, vals) + require.NoError(t, err) + require.Len(t, out, 3) + + for _, v := range vals { + require.True(t, out[v.String()].Equal(math.NewInt(333)), "validator %s should get 333", v.String()) + } + sum := math.ZeroInt() + for _, amt := range out { + sum = sum.Add(amt) + } + require.True(t, sum.Equal(totalStake)) +} + +// TestEqualWeightTarget_SingleValidator: n=1; that validator gets full totalStake. +func TestEqualWeightTarget_SingleValidator(t *testing.T) { + k := keeper.Keeper{} + totalStake := math.NewInt(500) + vals := []sdk.ValAddress{sdk.ValAddress(bytes.Repeat([]byte{1}, 20))} + + out, err := k.EqualWeightTarget(totalStake, vals) + require.NoError(t, err) + require.Len(t, out, 1) + require.True(t, out[vals[0].String()].Equal(totalStake)) +} + +// TestEqualWeightTarget_Errors: n=0 or totalStake negative returns error. +func TestEqualWeightTarget_Errors(t *testing.T) { + k := keeper.Keeper{} + vals := threeValAddrs() + + _, err := k.EqualWeightTarget(math.NewInt(1000), nil) + require.Error(t, err) + require.Contains(t, err.Error(), "empty") + + _, err = k.EqualWeightTarget(math.NewInt(1000), []sdk.ValAddress{}) + require.Error(t, err) + require.Contains(t, err.Error(), "empty") + + _, err = k.EqualWeightTarget(math.NewInt(-1), vals) + require.Error(t, err) + require.Contains(t, err.Error(), "negative") +} + +// TestTestKeeperWithParams verifies that testKeeperWithParams sets params and they can be read back. +func TestTestKeeperWithParams(t *testing.T) { + ctx, k := testKeeperWithParams(t, "50", "100") + params, err := k.GetParams(ctx) + require.NoError(t, err) + require.Equal(t, uint32(50), params.RebalanceThresholdBp) + require.True(t, params.MaxMovePerOp.Equal(math.NewInt(100))) +} + +// --------------------------------------------------------------------------- +// 3.2 PickBestRedelegation +// --------------------------------------------------------------------------- + +// helper to build sorted keys from deltas +func sortedKeys(deltas map[string]math.Int) []string { + keys := make([]string, 0, len(deltas)) + for k := range deltas { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// TestPickBestRedelegation_SinglePair verifies a basic src/dst selection without caps. +func TestPickBestRedelegation_SinglePair(t *testing.T) { + k := keeper.Keeper{} + deltas := map[string]math.Int{ + "src": math.NewInt(-100), + "dst": math.NewInt(50), + } + keys := sortedKeys(deltas) + blocked := make(map[string]map[string]struct{}) + maxMove := math.ZeroInt() // no cap + + src, dst, amt, ok := k.PickBestRedelegation(deltas, keys, blocked, maxMove) + require.True(t, ok) + require.Equal(t, "src", src) + require.Equal(t, "dst", dst) + require.True(t, amt.Equal(math.NewInt(50))) +} + +// TestPickBestRedelegation_MaxMoveCap ensures MaxMovePerOp cap is applied. +func TestPickBestRedelegation_MaxMoveCap(t *testing.T) { + k := keeper.Keeper{} + deltas := map[string]math.Int{ + "src": math.NewInt(-100), + "dst": math.NewInt(50), + } + keys := sortedKeys(deltas) + blocked := make(map[string]map[string]struct{}) + maxMove := math.NewInt(10) + + _, _, amt, ok := k.PickBestRedelegation(deltas, keys, blocked, maxMove) + require.True(t, ok) + require.True(t, amt.Equal(math.NewInt(10))) +} + +// TestPickBestRedelegation_MaxMoveZeroMeansNoCap verifies maxMove=0 does not cap moves. +func TestPickBestRedelegation_MaxMoveZeroMeansNoCap(t *testing.T) { + k := keeper.Keeper{} + deltas := map[string]math.Int{ + "src": math.NewInt(-30), + "dst": math.NewInt(100), + } + keys := sortedKeys(deltas) + blocked := make(map[string]map[string]struct{}) + maxMove := math.ZeroInt() + + _, _, amt, ok := k.PickBestRedelegation(deltas, keys, blocked, maxMove) + require.True(t, ok) + // min(|-30|, 100) = 30 since there is no cap + require.True(t, amt.Equal(math.NewInt(30))) +} + +// TestPickBestRedelegation_BlockedPair skips blocked src/dst pairs. +func TestPickBestRedelegation_BlockedPair(t *testing.T) { + k := keeper.Keeper{} + deltas := map[string]math.Int{ + "src": math.NewInt(-40), + "dst": math.NewInt(40), + } + keys := sortedKeys(deltas) + + // Block the only possible pair. + blocked := map[string]map[string]struct{}{ + "src": {"dst": {}}, + } + maxMove := math.ZeroInt() + + _, _, _, ok := k.PickBestRedelegation(deltas, keys, blocked, maxMove) + require.False(t, ok) +} + +// TestPickBestRedelegation_TieBreak ensures lexicographic tie-break on (src,dst). +func TestPickBestRedelegation_TieBreak(t *testing.T) { + k := keeper.Keeper{} + deltas := map[string]math.Int{ + "a": math.NewInt(-10), + "b": math.NewInt(-10), + "c": math.NewInt(10), + "d": math.NewInt(10), + } + keys := sortedKeys(deltas) + blocked := make(map[string]map[string]struct{}) + maxMove := math.ZeroInt() + + src, dst, amt, ok := k.PickBestRedelegation(deltas, keys, blocked, maxMove) + require.True(t, ok) + // All valid moves have amount 10; lexicographically smallest (src,dst) is ("a","c"). + require.Equal(t, "a", src) + require.Equal(t, "c", dst) + require.True(t, amt.Equal(math.NewInt(10))) +} + +// TestPickBestRedelegation_CappedTiePrefersLargerDstDeficit verifies that when capped move +// amounts tie, destination with larger deficit is selected. +func TestPickBestRedelegation_CappedTiePrefersLargerDstDeficit(t *testing.T) { + k := keeper.Keeper{} + deltas := map[string]math.Int{ + "src": math.NewInt(-100), + "dstA": math.NewInt(1000), + "dstB": math.NewInt(500), + } + keys := sortedKeys(deltas) + blocked := make(map[string]map[string]struct{}) + maxMove := math.NewInt(10) // both candidates cap to move=10 + + src, dst, amt, ok := k.PickBestRedelegation(deltas, keys, blocked, maxMove) + require.True(t, ok) + require.Equal(t, "src", src) + require.Equal(t, "dstA", dst, "larger deficit destination should win tie under cap") + require.True(t, amt.Equal(math.NewInt(10))) +} + +// TestPickBestRedelegation_NoSourceOrDest tests cases where no move is possible. +func TestPickBestRedelegation_NoSourceOrDest(t *testing.T) { + k := keeper.Keeper{} + + // All zero deltas. + deltasAllZero := map[string]math.Int{ + "a": math.ZeroInt(), + "b": math.ZeroInt(), + } + keys := sortedKeys(deltasAllZero) + blocked := make(map[string]map[string]struct{}) + + _, _, _, ok := k.PickBestRedelegation(deltasAllZero, keys, blocked, math.ZeroInt()) + require.False(t, ok) + + // All positive deltas (no overweight src). + deltasAllPositive := map[string]math.Int{ + "a": math.NewInt(10), + "b": math.NewInt(5), + } + keys = sortedKeys(deltasAllPositive) + _, _, _, ok = k.PickBestRedelegation(deltasAllPositive, keys, blocked, math.ZeroInt()) + require.False(t, ok) + + // All negative deltas (no underweight dst). + deltasAllNegative := map[string]math.Int{ + "a": math.NewInt(-10), + "b": math.NewInt(-5), + } + keys = sortedKeys(deltasAllNegative) + _, _, _, ok = k.PickBestRedelegation(deltasAllNegative, keys, blocked, math.ZeroInt()) + require.False(t, ok) +} + +// TestPickBestRedelegation_MultipleValidators picks the move with largest amount. +func TestPickBestRedelegation_MultipleValidators(t *testing.T) { + k := keeper.Keeper{} + deltas := map[string]math.Int{ + "src1": math.NewInt(-100), + "src2": math.NewInt(-20), + "dst1": math.NewInt(50), + "dst2": math.NewInt(60), + } + keys := sortedKeys(deltas) + blocked := make(map[string]map[string]struct{}) + maxMove := math.NewInt(1000) // effectively no cap + + src, dst, amt, ok := k.PickBestRedelegation(deltas, keys, blocked, maxMove) + require.True(t, ok) + // Best move is from src1 (overweight 100) to dst2 (underweight 60): amount 60. + require.Equal(t, "src1", src) + require.Equal(t, "dst2", dst) + require.True(t, amt.Equal(math.NewInt(60))) +} + +// --------------------------------------------------------------------------- +// 3.3 ComputeDeltas +// --------------------------------------------------------------------------- + +// TestComputeDeltas_Basic: target A=100 B=100, current A=120 B=80, totalStake=200; 50 bp threshold = 1. +func TestComputeDeltas_Basic(t *testing.T) { + _, k := testKeeperWithParams(t, "50", "0") + target := map[string]math.Int{"A": math.NewInt(100), "B": math.NewInt(100)} + current := map[string]math.Int{"A": math.NewInt(120), "B": math.NewInt(80)} + totalStake := math.NewInt(200) + + deltas, err := k.ComputeDeltas(target, current, totalStake, 50) + require.NoError(t, err) + require.Len(t, deltas, 2) + // delta = target - current: A = -20, B = +20. Threshold 200*50/10000 = 1; both |delta| >= 1. + require.True(t, deltas["A"].Equal(math.NewInt(-20))) + require.True(t, deltas["B"].Equal(math.NewInt(20))) +} + +// TestComputeDeltas_BelowThreshold: same target/current, high RebalanceThresholdBP so threshold > 20. +func TestComputeDeltas_BelowThreshold(t *testing.T) { + _, k := testKeeperWithParams(t, "1500", "0") // 15% -> threshold 200*1500/10000 = 30 + target := map[string]math.Int{"A": math.NewInt(100), "B": math.NewInt(100)} + current := map[string]math.Int{"A": math.NewInt(120), "B": math.NewInt(80)} + totalStake := math.NewInt(200) + + deltas, err := k.ComputeDeltas(target, current, totalStake, 1500) + require.NoError(t, err) + require.Len(t, deltas, 2) + // |delta| 20 < threshold 30 -> both zeroed. + require.True(t, deltas["A"].Equal(math.ZeroInt())) + require.True(t, deltas["B"].Equal(math.ZeroInt())) +} + +// TestComputeDeltas_UnionOfKeys: validator only in target or only in current; all keys present. +func TestComputeDeltas_UnionOfKeys(t *testing.T) { + _, k := testKeeperWithParams(t, "50", "0") + target := map[string]math.Int{"A": math.NewInt(100), "B": math.NewInt(100)} + current := map[string]math.Int{"A": math.NewInt(50), "C": math.NewInt(50)} + totalStake := math.NewInt(200) + + deltas, err := k.ComputeDeltas(target, current, totalStake, 50) + require.NoError(t, err) + require.Len(t, deltas, 3) + // A: 100-50=50; B: 100-0=100; C: 0-50=-50. Threshold 1; all non-zero. + require.True(t, deltas["A"].Equal(math.NewInt(50))) + require.True(t, deltas["B"].Equal(math.NewInt(100))) + require.True(t, deltas["C"].Equal(math.NewInt(-50))) +} + +// TestComputeDeltas_TotalStakeZero: threshold = 0; deltas are not zeroed by threshold. +func TestComputeDeltas_TotalStakeZero(t *testing.T) { + _, k := testKeeperWithParams(t, "50", "0") + target := map[string]math.Int{"A": math.NewInt(0), "B": math.NewInt(0)} + current := map[string]math.Int{"A": math.NewInt(0), "B": math.NewInt(0)} + totalStake := math.ZeroInt() + + deltas, err := k.ComputeDeltas(target, current, totalStake, 50) + require.NoError(t, err) + require.Len(t, deltas, 2) + // threshold = 0; delta A = 0, B = 0 (and 0 is not < 0, so they stay 0). + require.True(t, deltas["A"].Equal(math.ZeroInt())) + require.True(t, deltas["B"].Equal(math.ZeroInt())) +} diff --git a/x/poolrebalancer/keeper/redelegation.go b/x/poolrebalancer/keeper/redelegation.go new file mode 100644 index 00000000..aabf4dd3 --- /dev/null +++ b/x/poolrebalancer/keeper/redelegation.go @@ -0,0 +1,215 @@ +package keeper + +import ( + "context" + "errors" + "fmt" + "strconv" + "time" + + "github.com/cosmos/evm/x/poolrebalancer/types" + + storetypes "cosmossdk.io/store/types" + "github.com/cosmos/cosmos-sdk/runtime" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// addPendingRedelegation records a redelegation until its completion time. +// It writes the primary record, a by-source index record, and appends to the completion-time queue. +func (k Keeper) addPendingRedelegation(ctx context.Context, del sdk.AccAddress, srcVal, dstVal sdk.ValAddress, coin sdk.Coin, completionTime time.Time) error { + store := k.storeService.OpenKVStore(ctx) + denom := coin.Denom + + // Primary key: merge if an entry already exists for the same (del, denom, src, dst, completion). + primaryKey := types.GetPendingRedelegationKey(del, denom, srcVal, dstVal, completionTime) + var entry types.PendingRedelegation + if bz, err := store.Get(primaryKey); err == nil && bz != nil && len(bz) > 0 { + if err := k.cdc.Unmarshal(bz, &entry); err != nil { + return err + } + entry.Amount = entry.Amount.Add(coin) + } else { + entry = types.PendingRedelegation{ + DelegatorAddress: del.String(), + SrcValidatorAddress: srcVal.String(), + DstValidatorAddress: dstVal.String(), + Amount: coin, + CompletionTime: completionTime, + } + } + primaryBz := k.cdc.MustMarshal(&entry) + if err := store.Set(primaryKey, primaryBz); err != nil { + return err + } + + // Index by source validator; value is unused. + indexKey := types.GetPendingRedelegationBySrcIndexKey(srcVal, completionTime, denom, dstVal, del) + if err := store.Set(indexKey, []byte{}); err != nil { + return err + } + + // Append to the completion-time queue. + queueKey := types.GetPendingRedelegationQueueKey(completionTime) + var queued types.QueuedRedelegation + if bz, err := store.Get(queueKey); err == nil && bz != nil && len(bz) > 0 { + if err := k.cdc.Unmarshal(bz, &queued); err != nil { + return err + } + } + queued.Entries = append(queued.Entries, types.PendingRedelegation{ + DelegatorAddress: del.String(), + SrcValidatorAddress: srcVal.String(), + DstValidatorAddress: dstVal.String(), + Amount: coin, + CompletionTime: completionTime, + }) + queueBz := k.cdc.MustMarshal(&queued) + return store.Set(queueKey, queueBz) +} + +// HasImmatureRedelegationTo reports whether there's any in-flight redelegation to dstVal +// for the given (delegator, denom). This is used to prevent transitive redelegations. +func (k Keeper) HasImmatureRedelegationTo(ctx context.Context, del sdk.AccAddress, dstVal sdk.ValAddress, denom string) bool { + store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + prefix := types.GetPendingRedelegationPrefix(del, denom, dstVal) + iter := storetypes.KVStorePrefixIterator(store, prefix) + defer iter.Close() //nolint:errcheck + return iter.Valid() +} + +// CanBeginRedelegation performs local checks before calling the staking keeper. +// It rejects self-redelegations, non-positive amounts, and transitive redelegations. +func (k Keeper) CanBeginRedelegation(ctx context.Context, del sdk.AccAddress, srcVal, dstVal sdk.ValAddress, coin sdk.Coin) bool { + if srcVal.Equals(dstVal) { + return false + } + if !coin.Amount.IsPositive() { + return false + } + if k.HasImmatureRedelegationTo(ctx, del, srcVal, coin.Denom) { + return false + } + return true +} + +// BeginTrackedRedelegation calls the staking keeper and records the redelegation for later cleanup. +func (k Keeper) BeginTrackedRedelegation(ctx context.Context, del sdk.AccAddress, srcVal, dstVal sdk.ValAddress, coin sdk.Coin) (completionTime time.Time, err error) { + unbondingTime, err := k.stakingKeeper.UnbondingTime(ctx) + if err != nil { + return time.Time{}, fmt.Errorf("unbonding time: %w", err) + } + sdkCtx := sdk.UnwrapSDKContext(ctx) + completionTime = sdkCtx.BlockTime().Add(unbondingTime) + + srcValidator, err := k.stakingKeeper.GetValidator(ctx, srcVal) + if err != nil { + return time.Time{}, fmt.Errorf("get source validator: %w", err) + } + shares, err := srcValidator.SharesFromTokens(coin.Amount) + if err != nil { + return time.Time{}, fmt.Errorf("shares from tokens: %w", err) + } + if !shares.IsPositive() { + return time.Time{}, errors.New("shares amount is not positive") + } + + completionTime, err = k.stakingKeeper.BeginRedelegation(ctx, del, srcVal, dstVal, shares) + if err != nil { + return time.Time{}, fmt.Errorf("begin redelegation: %w", err) + } + + if err := k.addPendingRedelegation(ctx, del, srcVal, dstVal, coin, completionTime); err != nil { + return time.Time{}, fmt.Errorf("add pending redelegation: %w", err) + } + if err := k.markCommunityPoolReconcileDirtyIfPoolDelegator(ctx, del); err != nil { + return time.Time{}, err + } + + sdkCtx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeRedelegationStarted, + sdk.NewAttribute(types.AttributeKeyDelegator, del.String()), + sdk.NewAttribute(types.AttributeKeySrcValidator, srcVal.String()), + sdk.NewAttribute(types.AttributeKeyDstValidator, dstVal.String()), + sdk.NewAttribute(types.AttributeKeyAmount, coin.Amount.String()), + sdk.NewAttribute(types.AttributeKeyDenom, coin.Denom), + sdk.NewAttribute(types.AttributeKeyCompletionTime, completionTime.UTC().Format(time.RFC3339Nano)), + ), + ) + + return completionTime, nil +} + +// deletePendingRedelegation removes the primary and index records for a pending redelegation. +// Deletes are idempotent: removing a missing key is a no-op. +func (k Keeper) deletePendingRedelegation(ctx context.Context, entry types.PendingRedelegation, completion time.Time) error { + store := k.storeService.OpenKVStore(ctx) + del, err := sdk.AccAddressFromBech32(entry.DelegatorAddress) + if err != nil { + return err + } + srcVal, err := sdk.ValAddressFromBech32(entry.SrcValidatorAddress) + if err != nil { + return err + } + dstVal, err := sdk.ValAddressFromBech32(entry.DstValidatorAddress) + if err != nil { + return err + } + denom := entry.Amount.Denom + + primaryKey := types.GetPendingRedelegationKey(del, denom, srcVal, dstVal, completion) + if err := store.Delete(primaryKey); err != nil { + return err + } + indexKey := types.GetPendingRedelegationBySrcIndexKey(srcVal, completion, denom, dstVal, del) + return store.Delete(indexKey) +} + +// CompletePendingRedelegations removes matured redelegation records and their indexes. +func (k Keeper) CompletePendingRedelegations(ctx context.Context) error { + sdkCtx := sdk.UnwrapSDKContext(ctx) + blockTime := sdkCtx.BlockTime() + completed := 0 + + store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + start := types.PendingRedelegationQueueKey + end := types.GetPendingRedelegationQueueKey(blockTime) + // Include keys with completionTime <= blockTime by using an exclusive end key immediately after end. + endExclusive := append(append([]byte{}, end...), 0xFF) + + iter := store.Iterator(start, endExclusive) + defer iter.Close() //nolint:errcheck + + for ; iter.Valid(); iter.Next() { + key := iter.Key() + completion, err := types.ParsePendingRedelegationQueueKey(key) + if err != nil { + return err + } + var queued types.QueuedRedelegation + if err := k.cdc.Unmarshal(iter.Value(), &queued); err != nil { + return err + } + for _, entry := range queued.Entries { + if err := k.deletePendingRedelegation(ctx, entry, completion); err != nil { + return err + } + completed++ + } + store.Delete(key) + } + + if completed > 0 { + sdkCtx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeRedelegationsCompleted, + sdk.NewAttribute(types.AttributeKeyCount, strconv.Itoa(completed)), + sdk.NewAttribute(types.AttributeKeyCompletionTime, blockTime.UTC().Format(time.RFC3339Nano)), + ), + ) + } + + return nil +} diff --git a/x/poolrebalancer/keeper/redelegation_test.go b/x/poolrebalancer/keeper/redelegation_test.go new file mode 100644 index 00000000..e9aa029b --- /dev/null +++ b/x/poolrebalancer/keeper/redelegation_test.go @@ -0,0 +1,126 @@ +package keeper + +import ( + "bytes" + "testing" + "time" + + "cosmossdk.io/math" + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/evm/x/poolrebalancer/types" +) + +func TestHasImmatureRedelegationTo_BlocksSrcWhenDstHasIncoming(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + + ctx = ctx.WithBlockTime(time.Unix(2_000, 0)) + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + srcVal := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + dstVal := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + denom := "stake" + + entry := types.PendingRedelegation{ + DelegatorAddress: del.String(), + SrcValidatorAddress: srcVal.String(), + DstValidatorAddress: dstVal.String(), + Amount: sdk.NewCoin(denom, math.NewInt(100)), + CompletionTime: ctx.BlockTime().Add(time.Hour), + } + require.NoError(t, k.SetPendingRedelegation(ctx, entry)) + + require.True(t, k.HasImmatureRedelegationTo(ctx, del, dstVal, denom)) + + otherVal := sdk.ValAddress(bytes.Repeat([]byte{4}, 20)) + require.False(t, k.HasImmatureRedelegationTo(ctx, del, otherVal, denom)) +} + +func TestCompletePendingRedelegations_RemovesPrimaryIndexAndQueue(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + + ctx = ctx.WithBlockTime(time.Unix(2_000, 0)) + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + srcVal := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + dstVal := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + denom := "stake" + + completion := ctx.BlockTime().Add(-time.Second) + coin := sdk.NewCoin(denom, math.NewInt(10)) + entry := types.PendingRedelegation{ + DelegatorAddress: del.String(), + SrcValidatorAddress: srcVal.String(), + DstValidatorAddress: dstVal.String(), + Amount: coin, + CompletionTime: completion, + } + require.NoError(t, k.SetPendingRedelegation(ctx, entry)) + + primaryKey := types.GetPendingRedelegationKey(del, denom, srcVal, dstVal, completion) + indexKey := types.GetPendingRedelegationBySrcIndexKey(srcVal, completion, denom, dstVal, del) + queueKey := types.GetPendingRedelegationQueueKey(completion) + + store := k.storeService.OpenKVStore(ctx) + bz, err := store.Get(primaryKey) + require.NoError(t, err) + require.NotNil(t, bz) + + require.NoError(t, k.CompletePendingRedelegations(ctx)) + + bz, err = store.Get(primaryKey) + require.NoError(t, err) + require.Nil(t, bz) + + bz, err = store.Get(indexKey) + require.NoError(t, err) + require.Nil(t, bz) + + bz, err = store.Get(queueKey) + require.NoError(t, err) + require.Nil(t, bz) + + // Idempotency: running again should not error. + require.NoError(t, k.CompletePendingRedelegations(ctx)) +} + +func TestSetPendingRedelegation_DistinctSourcesDoNotMerge(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + + ctx = ctx.WithBlockTime(time.Unix(2_000, 0)) + del := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + srcA := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + srcB := sdk.ValAddress(bytes.Repeat([]byte{4}, 20)) + dstVal := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + denom := "stake" + completion := ctx.BlockTime().Add(time.Hour) + + entryA := types.PendingRedelegation{ + DelegatorAddress: del.String(), + SrcValidatorAddress: srcA.String(), + DstValidatorAddress: dstVal.String(), + Amount: sdk.NewCoin(denom, math.NewInt(10)), + CompletionTime: completion, + } + entryB := types.PendingRedelegation{ + DelegatorAddress: del.String(), + SrcValidatorAddress: srcB.String(), + DstValidatorAddress: dstVal.String(), + Amount: sdk.NewCoin(denom, math.NewInt(15)), + CompletionTime: completion, + } + require.NoError(t, k.SetPendingRedelegation(ctx, entryA)) + require.NoError(t, k.SetPendingRedelegation(ctx, entryB)) + + store := k.storeService.OpenKVStore(ctx) + keyA := types.GetPendingRedelegationKey(del, denom, srcA, dstVal, completion) + keyB := types.GetPendingRedelegationKey(del, denom, srcB, dstVal, completion) + + bzA, err := store.Get(keyA) + require.NoError(t, err) + require.NotNil(t, bzA) + + bzB, err := store.Get(keyB) + require.NoError(t, err) + require.NotNil(t, bzB) +} diff --git a/x/poolrebalancer/keeper/slash_snapshot.go b/x/poolrebalancer/keeper/slash_snapshot.go new file mode 100644 index 00000000..ad815aff --- /dev/null +++ b/x/poolrebalancer/keeper/slash_snapshot.go @@ -0,0 +1,133 @@ +package keeper + +import ( + "context" + "errors" + "fmt" + "sort" + "strings" + + "github.com/cosmos/evm/x/poolrebalancer/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + distributiontypes "github.com/cosmos/cosmos-sdk/x/distribution/types" +) + +var errMissingPreviousBlockSlashedValidatorsSnapshot = errors.New("poolrebalancer: missing previous-block slashed validators snapshot") + +func (k Keeper) setPreviousBlockSlashedValidators(ctx context.Context, validators map[string]struct{}) error { + if k.transientKey == nil { + return errors.New("poolrebalancer: transient key is nil") + } + + keys := make([]string, 0, len(validators)) + for val := range validators { + keys = append(keys, val) + } + sort.Strings(keys) + + sdkCtx := sdk.UnwrapSDKContext(ctx) + store := sdkCtx.TransientStore(k.transientKey) + store.Set(types.PreviousBlockSlashedValidatorsTransientKey, []byte(strings.Join(keys, "\n"))) + return nil +} + +func (k Keeper) getPreviousBlockSlashedValidators(ctx context.Context) (map[string]struct{}, error) { + if k.transientKey == nil { + return nil, errors.New("poolrebalancer: transient key is nil") + } + + sdkCtx := sdk.UnwrapSDKContext(ctx) + bz := sdkCtx.TransientStore(k.transientKey).Get(types.PreviousBlockSlashedValidatorsTransientKey) + if bz == nil { + return nil, fmt.Errorf("%w (PreparePreviousBlockSlashedValidators not run?)", errMissingPreviousBlockSlashedValidatorsSnapshot) + } + if len(bz) == 0 { + return map[string]struct{}{}, nil + } + + lines := strings.Split(string(bz), "\n") + out := make(map[string]struct{}, len(lines)) + for _, line := range lines { + if line == "" { + continue + } + if _, err := sdk.ValAddressFromBech32(line); err != nil { + return nil, fmt.Errorf("invalid slashed validator address in snapshot: %w", err) + } + out[line] = struct{}{} + } + return out, nil +} + +// getPreviousBlockSlashedValidatorsOrEmpty preserves pre-existing test/helper flows that invoke +// ProcessRebalance without a full BeginBlock pass by treating an absent snapshot as an empty set. +func (k Keeper) getPreviousBlockSlashedValidatorsOrEmpty(ctx context.Context) (map[string]struct{}, error) { + slashed, err := k.getPreviousBlockSlashedValidators(ctx) + if err != nil { + if errors.Is(err, errMissingPreviousBlockSlashedValidatorsSnapshot) { + return map[string]struct{}{}, nil + } + return nil, err + } + return slashed, nil +} + +// PreparePreviousBlockSlashedValidators snapshots relevant validators with slash events at height blockHeight-1. +// It bounds work to validators relevant to poolrebalancer state: the pool delegator's current +// delegations plus the current target bonded validator set. Slash data is read directly from the +// distribution keeper via IterateValidatorSlashEventsBetween rather than going through query-layer +// pagination/response allocation. +func (k Keeper) PreparePreviousBlockSlashedValidators(ctx context.Context) error { + if k.distrKeeper == nil { + return k.setPreviousBlockSlashedValidators(ctx, map[string]struct{}{}) + } + + sdkCtx := sdk.UnwrapSDKContext(ctx) + prevHeight := sdkCtx.BlockHeight() - 1 + if prevHeight <= 0 { + return k.setPreviousBlockSlashedValidators(ctx, map[string]struct{}{}) + } + + candidates := make(map[string]struct{}) + + poolDel, err := k.GetPoolDelegatorAddress(ctx) + if err != nil { + return err + } + if !poolDel.Empty() { + delegations, err := k.getAllDelegatorDelegations(ctx, poolDel) + if err != nil { + return err + } + for _, d := range delegations { + if _, err := sdk.ValAddressFromBech32(d.ValidatorAddress); err != nil { + return err + } + candidates[d.ValidatorAddress] = struct{}{} + } + } + + targetVals, err := k.GetTargetBondedValidators(ctx) + if err != nil { + return err + } + for _, val := range targetVals { + candidates[val.String()] = struct{}{} + } + + slashed := make(map[string]struct{}) + for valAddr := range candidates { + val, err := sdk.ValAddressFromBech32(valAddr) + if err != nil { + return err + } + + k.distrKeeper.IterateValidatorSlashEventsBetween(ctx, val, uint64(prevHeight), uint64(prevHeight), func(height uint64, _ distributiontypes.ValidatorSlashEvent) (stop bool) { + slashed[valAddr] = struct{}{} + return true + }) + } + + return k.setPreviousBlockSlashedValidators(ctx, slashed) +} diff --git a/x/poolrebalancer/keeper/slash_snapshot_test.go b/x/poolrebalancer/keeper/slash_snapshot_test.go new file mode 100644 index 00000000..845af945 --- /dev/null +++ b/x/poolrebalancer/keeper/slash_snapshot_test.go @@ -0,0 +1,99 @@ +package keeper + +import ( + "bytes" + "context" + "testing" + + "github.com/stretchr/testify/require" + + "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + distributiontypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + "github.com/cosmos/evm/x/poolrebalancer/types" +) + +type mockDistributionQuerier struct { + slashHeightsByValidator map[string]map[uint64]struct{} + queriedValidators []string +} + +func (m *mockDistributionQuerier) IterateValidatorSlashEventsBetween(_ context.Context, val sdk.ValAddress, startingHeight, endingHeight uint64, handler func(height uint64, event distributiontypes.ValidatorSlashEvent) (stop bool)) { + if m == nil { + return + } + valAddr := val.String() + m.queriedValidators = append(m.queriedValidators, valAddr) + if heights, ok := m.slashHeightsByValidator[valAddr]; ok { + for h := startingHeight; h <= endingHeight; h++ { + if _, slashed := heights[h]; slashed { + handler(h, distributiontypes.ValidatorSlashEvent{ + ValidatorPeriod: h, + Fraction: math.LegacyNewDec(1), + }) + return + } + } + } +} + +func TestPreparePreviousBlockSlashedValidators_WritesEmptyAtGenesisHeights(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + ctx = ctx.WithBlockHeight(1) + dq := &mockDistributionQuerier{} + k.distrKeeper = dq + + require.NoError(t, k.PreparePreviousBlockSlashedValidators(ctx)) + + slashed, err := k.getPreviousBlockSlashedValidators(ctx) + require.NoError(t, err) + require.Empty(t, slashed) + require.Empty(t, dq.queriedValidators, "genesis-like heights should not issue slash queries") +} + +func TestPreparePreviousBlockSlashedValidators_UsesPreviousHeightAndRelevantCandidates(t *testing.T) { + ctx, k, _ := newTestKeeper(t) + ctx = ctx.WithBlockHeight(10) + k.evmKeeper = &mockEVMKeeper{} + + poolDel := sdk.AccAddress(bytes.Repeat([]byte{1}, 20)) + targetA := sdk.ValAddress(bytes.Repeat([]byte{2}, 20)) + targetB := sdk.ValAddress(bytes.Repeat([]byte{3}, 20)) + delegatedOnly := sdk.ValAddress(bytes.Repeat([]byte{4}, 20)) + unrelated := sdk.ValAddress(bytes.Repeat([]byte{5}, 20)) + + params := types.DefaultParams() + params.PoolDelegatorAddress = poolDel.String() + params.MaxTargetValidators = 2 + require.NoError(t, k.SetParams(ctx, params)) + + sk := k.stakingKeeper.(*mockStakingKeeper) + sk.vals = []stakingtypes.Validator{ + {OperatorAddress: targetA.String(), Tokens: math.NewInt(100), DelegatorShares: math.LegacyNewDec(100)}, + {OperatorAddress: targetB.String(), Tokens: math.NewInt(90), DelegatorShares: math.LegacyNewDec(90)}, + } + sk.delegations = []stakingtypes.Delegation{ + {DelegatorAddress: poolDel.String(), ValidatorAddress: delegatedOnly.String(), Shares: math.LegacyNewDec(10)}, + } + + dq := &mockDistributionQuerier{ + slashHeightsByValidator: map[string]map[uint64]struct{}{ + targetB.String(): {9: {}}, + delegatedOnly.String(): {8: {}}, + unrelated.String(): {9: {}}, + }, + } + k.distrKeeper = dq + + require.NoError(t, k.PreparePreviousBlockSlashedValidators(ctx)) + + slashed, err := k.getPreviousBlockSlashedValidators(ctx) + require.NoError(t, err) + require.Equal(t, map[string]struct{}{ + targetB.String(): {}, + }, slashed) + + require.ElementsMatch(t, []string{delegatedOnly.String(), targetA.String(), targetB.String()}, dq.queriedValidators) +} diff --git a/x/poolrebalancer/keeper/test_helpers_test.go b/x/poolrebalancer/keeper/test_helpers_test.go new file mode 100644 index 00000000..0ccd4573 --- /dev/null +++ b/x/poolrebalancer/keeper/test_helpers_test.go @@ -0,0 +1,89 @@ +package keeper + +import ( + "bytes" + "context" + "testing" + + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/stretchr/testify/require" + + "github.com/cosmos/evm/x/poolrebalancer/types" +) + +// mockAccountKeeper is an in-memory auth stub for unit tests (e.g. user pubkey rejection). +type mockAccountKeeper struct { + accounts map[string]sdk.AccountI +} + +func newMockAccountKeeper() *mockAccountKeeper { + return &mockAccountKeeper{accounts: make(map[string]sdk.AccountI)} +} + +func (m *mockAccountKeeper) GetAccount(_ context.Context, addr sdk.AccAddress) sdk.AccountI { + if m == nil { + return nil + } + return m.accounts[addr.String()] +} + +func (m *mockAccountKeeper) SetAccount(_ context.Context, acc sdk.AccountI) { + m.accounts[acc.GetAddress().String()] = acc +} + +func (m *mockAccountKeeper) NewAccountWithAddress(_ context.Context, addr sdk.AccAddress) sdk.AccountI { + return authtypes.NewBaseAccountWithAddress(addr) +} + +// newTestKeeper returns a keeper with in-memory auth (mockAccountKeeper) and nil EVM. +// Before SetParams with a non-empty PoolDelegatorAddress, assign k.evmKeeper (e.g. &mockEVMKeeper{}) +// unless the test intentionally exercises validation failure or clears EVM after a successful SetParams. +func newTestKeeper(t *testing.T) (sdk.Context, Keeper, *mockAccountKeeper) { + t.Helper() + + storeKey := storetypes.NewKVStoreKey(types.ModuleName) + tKey := storetypes.NewTransientStoreKey("transient_test") + ctx := testutil.DefaultContext(storeKey, tKey) + + storeService := runtime.NewKVStoreService(storeKey) + cdc := moduletestutil.MakeTestEncodingConfig().Codec + stakingKeeper := &mockStakingKeeper{} + + authority := sdk.AccAddress(bytes.Repeat([]byte{9}, 20)) + mockAcc := newMockAccountKeeper() + k := NewKeeper(cdc, storeService, tKey, stakingKeeper, stakingKeeper, nil, authority, nil, mockAcc) + return ctx, k, mockAcc +} + +// newTestKeeperNilAuthAndEVM matches genesis-style wiring (no auth, no EVM). Non-empty +// pool_delegator_address cannot be persisted on this keeper. +func newTestKeeperNilAuthAndEVM(t *testing.T) (sdk.Context, Keeper) { + t.Helper() + + storeKey := storetypes.NewKVStoreKey(types.ModuleName) + tKey := storetypes.NewTransientStoreKey("transient_test") + ctx := testutil.DefaultContext(storeKey, tKey) + + storeService := runtime.NewKVStoreService(storeKey) + cdc := moduletestutil.MakeTestEncodingConfig().Codec + stakingKeeper := &mockStakingKeeper{} + authority := sdk.AccAddress(bytes.Repeat([]byte{9}, 20)) + k := NewKeeper(cdc, storeService, tKey, stakingKeeper, stakingKeeper, nil, authority, nil, nil) + return ctx, k +} + +func setPoolDelegatorForTest(t *testing.T, ctx sdk.Context, k *Keeper, poolDel sdk.AccAddress) { + t.Helper() + if k.evmKeeper == nil { + k.evmKeeper = &mockEVMKeeper{} + } + params := types.DefaultParams() + params.PoolDelegatorAddress = poolDel.String() + require.NoError(t, k.SetParams(ctx, params)) +} diff --git a/x/poolrebalancer/module.go b/x/poolrebalancer/module.go new file mode 100644 index 00000000..6b11cd5e --- /dev/null +++ b/x/poolrebalancer/module.go @@ -0,0 +1,149 @@ +package poolrebalancer + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + abci "github.com/cometbft/cometbft/abci/types" + + "github.com/cosmos/evm/x/poolrebalancer/client/cli" + "github.com/cosmos/evm/x/poolrebalancer/keeper" + "github.com/cosmos/evm/x/poolrebalancer/types" + + "cosmossdk.io/core/appmodule" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" +) + +// ConsensusVersion defines the current module consensus version. +const ConsensusVersion = 1 + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} + _ module.HasABCIGenesis = AppModule{} + _ appmodule.AppModule = AppModule{} + _ appmodule.HasBeginBlocker = AppModule{} + _ appmodule.HasEndBlocker = AppModule{} +) + +// AppModuleBasic implements module.AppModuleBasic for the poolrebalancer module. +type AppModuleBasic struct{} + +func NewAppModuleBasic() AppModuleBasic { + return AppModuleBasic{} +} + +// Name returns the module name. +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec registers the module's types with the LegacyAmino codec. +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterLegacyAminoCodec(cdc) +} + +// ConsensusVersion returns the consensus state-breaking version for the module. +func (AppModuleBasic) ConsensusVersion() uint64 { + return ConsensusVersion +} + +// RegisterInterfaces registers the module's interface types. +func (AppModuleBasic) RegisterInterfaces(reg cdctypes.InterfaceRegistry) { + types.RegisterInterfaces(reg) +} + +// DefaultGenesis returns default genesis state as raw JSON. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesisState()) +} + +// ValidateGenesis validates the genesis state. +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage) error { + var gs types.GenesisState + if err := cdc.UnmarshalJSON(bz, &gs); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + return gs.Validate() +} + +// RegisterGRPCGatewayRoutes registers the module's gRPC-gateway routes. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + if err := types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)); err != nil { + panic(err) + } +} + +// GetTxCmd returns the root tx command (nil; no user-facing tx CLI). +func (AppModuleBasic) GetTxCmd() *cobra.Command { + return nil +} + +// GetQueryCmd returns the root query command. +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd() +} + +// AppModule implements module.AppModule for the poolrebalancer module. +type AppModule struct { + AppModuleBasic + keeper keeper.Keeper +} + +// NewAppModule returns a new AppModule. +func NewAppModule(k keeper.Keeper) AppModule { + return AppModule{ + AppModuleBasic: NewAppModuleBasic(), + keeper: k, + } +} + +// Name returns the module name. +func (am AppModule) Name() string { + return am.AppModuleBasic.Name() +} + +// RegisterServices registers the module's gRPC query and msg services. +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterQueryServer(cfg.QueryServer(), keeper.NewQueryServer(am.keeper)) + types.RegisterMsgServer(cfg.MsgServer(), &am.keeper) +} + +// BeginBlock runs the module BeginBlocker. +func (am AppModule) BeginBlock(ctx context.Context) error { + return BeginBlocker(sdk.UnwrapSDKContext(ctx), am.keeper) +} + +// EndBlock runs the module EndBlocker. +func (am AppModule) EndBlock(ctx context.Context) error { + return EndBlocker(sdk.UnwrapSDKContext(ctx), am.keeper) +} + +// InitGenesis initializes the module from genesis. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, gs json.RawMessage) []abci.ValidatorUpdate { + var genState types.GenesisState + cdc.MustUnmarshalJSON(gs, &genState) + InitGenesis(ctx, am.keeper, &genState) + return []abci.ValidatorUpdate{} +} + +// ExportGenesis exports the module state to genesis. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + genState := ExportGenesis(ctx, am.keeper) + return cdc.MustMarshalJSON(genState) +} + +// IsAppModule implements appmodule.AppModule. +func (AppModule) IsAppModule() {} + +// IsOnePerModuleType implements depinject.OnePerModuleType. +func (AppModule) IsOnePerModuleType() {} diff --git a/x/poolrebalancer/types/codec.go b/x/poolrebalancer/types/codec.go new file mode 100644 index 00000000..2f920ec8 --- /dev/null +++ b/x/poolrebalancer/types/codec.go @@ -0,0 +1,39 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +var ( + amino = codec.NewLegacyAmino() + + // ModuleCdc is a module-local codec helper. + // Most state and service encoding uses the app's configured codec; this exists mainly for JSON contexts. + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) + + // AminoCdc supports amino JSON for legacy msg encoding. + AminoCdc = codec.NewAminoCodec(amino) //nolint:staticcheck +) + +const ( + updateParamsName = "cosmos/evm/x/poolrebalancer/MsgUpdateParams" +) + +func init() { + RegisterLegacyAminoCodec(amino) + amino.Seal() +} + +// RegisterInterfaces registers the module's interfaces with the registry. +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgUpdateParams{}) + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} + +// RegisterLegacyAminoCodec registers the module's types with the LegacyAmino codec. +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgUpdateParams{}, updateParamsName, nil) +} diff --git a/x/poolrebalancer/types/communitypool_abi.go b/x/poolrebalancer/types/communitypool_abi.go new file mode 100644 index 00000000..19d63c2e --- /dev/null +++ b/x/poolrebalancer/types/communitypool_abi.go @@ -0,0 +1,26 @@ +package types + +import ( + "bytes" + + "github.com/ethereum/go-ethereum/accounts/abi" + + _ "embed" +) + +var ( + //go:embed communitypool_abi.json + communityPoolABIBz []byte + + // CommunityPoolABI contains the minimal ABI for pool automation: stake, harvest, + // reconcileTotalStaked, and view getters for totalUnits / totalStaked. + CommunityPoolABI abi.ABI +) + +func init() { + var err error + CommunityPoolABI, err = abi.JSON(bytes.NewReader(communityPoolABIBz)) + if err != nil { + panic(err) + } +} diff --git a/x/poolrebalancer/types/communitypool_abi.json b/x/poolrebalancer/types/communitypool_abi.json new file mode 100644 index 00000000..fcdc517a --- /dev/null +++ b/x/poolrebalancer/types/communitypool_abi.json @@ -0,0 +1,67 @@ +[ + { + "inputs": [], + "name": "stake", + "outputs": [ + { + "internalType": "uint256", + "name": "delegatedAmount", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "harvest", + "outputs": [ + { + "internalType": "uint256", + "name": "harvestedAmount", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "newTotalStaked", + "type": "uint256" + } + ], + "name": "reconcileTotalStaked", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "totalStaked", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "totalUnits", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + } +] diff --git a/x/poolrebalancer/types/communitypool_abi_test.go b/x/poolrebalancer/types/communitypool_abi_test.go new file mode 100644 index 00000000..9623e8ac --- /dev/null +++ b/x/poolrebalancer/types/communitypool_abi_test.go @@ -0,0 +1,32 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCommunityPoolABI_MethodsPresent(t *testing.T) { + stakeMethod, ok := CommunityPoolABI.Methods["stake"] + require.True(t, ok) + require.Empty(t, stakeMethod.Inputs) + + harvestMethod, ok := CommunityPoolABI.Methods["harvest"] + require.True(t, ok) + require.Empty(t, harvestMethod.Inputs) + + reconcileMethod, ok := CommunityPoolABI.Methods["reconcileTotalStaked"] + require.True(t, ok) + require.Len(t, reconcileMethod.Inputs, 1) + require.Equal(t, "uint256", reconcileMethod.Inputs[0].Type.String()) + + totalStakedMethod, ok := CommunityPoolABI.Methods["totalStaked"] + require.True(t, ok) + require.Empty(t, totalStakedMethod.Inputs) + require.Equal(t, "view", totalStakedMethod.StateMutability) + + totalUnitsMethod, ok := CommunityPoolABI.Methods["totalUnits"] + require.True(t, ok) + require.Empty(t, totalUnitsMethod.Inputs) + require.Equal(t, "view", totalUnitsMethod.StateMutability) +} diff --git a/x/poolrebalancer/types/errors.go b/x/poolrebalancer/types/errors.go new file mode 100644 index 00000000..7b164168 --- /dev/null +++ b/x/poolrebalancer/types/errors.go @@ -0,0 +1,14 @@ +package types + +import ( + "cosmossdk.io/errors" +) + +// Sentinel errors for the poolrebalancer module. +var ( + ErrInvalidPoolDelegator = errors.Register(ModuleName, 1, "pool delegator address not set or invalid") + ErrTransitiveRedelegation = errors.Register(ModuleName, 2, "redelegation blocked: immature redelegation to source validator") + ErrSameValidator = errors.Register(ModuleName, 3, "source and destination validator cannot be the same") + ErrInvalidAmount = errors.Register(ModuleName, 4, "amount must be positive") + ErrNoDelegation = errors.Register(ModuleName, 5, "no delegation found for delegator and validator") +) diff --git a/x/poolrebalancer/types/events.go b/x/poolrebalancer/types/events.go new file mode 100644 index 00000000..2883d3f7 --- /dev/null +++ b/x/poolrebalancer/types/events.go @@ -0,0 +1,21 @@ +package types + +const ( + // Event types. + EventTypeRebalanceSummary = "rebalance_summary" + EventTypeRedelegationStarted = "redelegation_started" + EventTypeRedelegationFailed = "redelegation_failed" + EventTypeRedelegationsCompleted = "redelegations_completed" + + // Common attributes. + AttributeKeyDelegator = "delegator" + AttributeKeyValidator = "validator" + AttributeKeySrcValidator = "src_validator" + AttributeKeyDstValidator = "dst_validator" + AttributeKeyAmount = "amount" + AttributeKeyDenom = "denom" + AttributeKeyCompletionTime = "completion_time" + AttributeKeyCount = "count" + AttributeKeyOpsDone = "ops_done" + AttributeKeyReason = "reason" +) diff --git a/x/poolrebalancer/types/helpers.go b/x/poolrebalancer/types/helpers.go new file mode 100644 index 00000000..8bffedf7 --- /dev/null +++ b/x/poolrebalancer/types/helpers.go @@ -0,0 +1,93 @@ +package types + +import ( + "fmt" + + "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// DefaultParams returns the default module parameters. +func DefaultParams() Params { + return Params{ + PoolDelegatorAddress: "", // empty = rebalancer disabled until set + MaxTargetValidators: uint32(30), + RebalanceThresholdBp: uint32(50), // 0.5% + MaxOpsPerBlock: uint32(5), + MaxMovePerOp: math.ZeroInt(), // 0 means no cap + } +} + +// Validate runs stateless checks only. For pool_delegator_address that means Bech32 form when +// non-empty—no EVM IsContract, no auth/account checks. User accounts, contract proof, and +// bootstrap ordering are enforced in keeper.validatePoolDelegatorAddress (via SetParams). +func (p Params) Validate() error { + if p.PoolDelegatorAddress != "" { + if _, err := sdk.AccAddressFromBech32(p.PoolDelegatorAddress); err != nil { + return fmt.Errorf("invalid pool_delegator_address: %w", err) + } + } + if p.MaxTargetValidators == 0 { + return fmt.Errorf("max_target_validators must be positive") + } + if p.RebalanceThresholdBp > 10_000 { + return fmt.Errorf("rebalance_threshold_bp cannot exceed 10000") + } + if p.MaxOpsPerBlock == 0 { + return fmt.Errorf("max_ops_per_block must be positive") + } + if !p.MaxMovePerOp.IsNil() && p.MaxMovePerOp.IsNegative() { + return fmt.Errorf("max_move_per_op cannot be negative") + } + return nil +} + +// DefaultGenesisState returns a default genesis state. +func DefaultGenesisState() *GenesisState { + return &GenesisState{ + Params: DefaultParams(), + } +} + +// Validate validates a pending redelegation record (e.g. for genesis import). +func (pr PendingRedelegation) Validate() error { + if _, err := sdk.AccAddressFromBech32(pr.DelegatorAddress); err != nil { + return fmt.Errorf("invalid delegator_address: %w", err) + } + srcVal, err := sdk.ValAddressFromBech32(pr.SrcValidatorAddress) + if err != nil { + return fmt.Errorf("invalid src_validator_address: %w", err) + } + dstVal, err := sdk.ValAddressFromBech32(pr.DstValidatorAddress) + if err != nil { + return fmt.Errorf("invalid dst_validator_address: %w", err) + } + if srcVal.Equals(dstVal) { + return fmt.Errorf("src_validator_address and dst_validator_address must differ") + } + if err := pr.Amount.Validate(); err != nil { + return fmt.Errorf("invalid amount: %w", err) + } + if !pr.Amount.IsPositive() { + return fmt.Errorf("amount must be positive") + } + if pr.CompletionTime.IsZero() { + return fmt.Errorf("completion_time must be set") + } + return nil +} + +// Validate checks genesis params using the same stateless rules as Params.Validate; pool +// delegator safety still depends on keeper validation when InitGenesis calls SetParams. +func (gs *GenesisState) Validate() error { + if err := gs.Params.Validate(); err != nil { + return err + } + for i, pr := range gs.PendingRedelegations { + if err := pr.Validate(); err != nil { + return fmt.Errorf("pending_redelegations[%d]: %w", i, err) + } + } + return nil +} diff --git a/x/poolrebalancer/types/helpers_test.go b/x/poolrebalancer/types/helpers_test.go new file mode 100644 index 00000000..49b5703d --- /dev/null +++ b/x/poolrebalancer/types/helpers_test.go @@ -0,0 +1,64 @@ +package types + +import ( + "bytes" + "testing" + + "cosmossdk.io/math" + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func TestParamsValidate_RejectsThresholdAbove10000(t *testing.T) { + p := DefaultParams() + p.RebalanceThresholdBp = 10001 + + err := p.Validate() + require.Error(t, err) + require.Contains(t, err.Error(), "rebalance_threshold_bp") +} + +func TestParamsValidate_RejectsNegativeMaxMovePerOp(t *testing.T) { + p := DefaultParams() + p.MaxMovePerOp = math.NewInt(-1) + + err := p.Validate() + require.Error(t, err) + require.Contains(t, err.Error(), "max_move_per_op") +} + +func TestParamsValidate_RejectsInvalidPoolDelegatorAddress(t *testing.T) { + p := DefaultParams() + p.PoolDelegatorAddress = "not-a-valid-bech32" + + err := p.Validate() + require.Error(t, err) + require.Contains(t, err.Error(), "pool_delegator_address") +} + +// Well-formed Bech32 is enough for Params.Validate; contract-only and user-pubkey rules are not applied here. +func TestParamsValidate_AcceptsWellFormedBech32PoolDelegatorOnly(t *testing.T) { + p := DefaultParams() + p.PoolDelegatorAddress = sdk.AccAddress(bytes.Repeat([]byte{0x42}, 20)).String() + + require.NoError(t, p.Validate()) +} + +func TestParamsValidate_RejectsZeroMaxOpsPerBlock(t *testing.T) { + p := DefaultParams() + p.MaxOpsPerBlock = 0 + + err := p.Validate() + require.Error(t, err) + require.Contains(t, err.Error(), "max_ops_per_block") +} + +func TestParamsValidate_RejectsZeroMaxTargetValidators(t *testing.T) { + p := DefaultParams() + p.MaxTargetValidators = 0 + + err := p.Validate() + require.Error(t, err) + require.Contains(t, err.Error(), "max_target_validators") +} diff --git a/x/poolrebalancer/types/interfaces.go b/x/poolrebalancer/types/interfaces.go new file mode 100644 index 00000000..fe4a4da7 --- /dev/null +++ b/x/poolrebalancer/types/interfaces.go @@ -0,0 +1,65 @@ +package types + +import ( + "context" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + + evmtypes "github.com/cosmos/evm/x/vm/types" + + sdkmath "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + distributiontypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +// StakingKeeper defines the subset of staking keeper methods used by poolrebalancer. +type StakingKeeper interface { + GetBondedValidatorsByPower(ctx context.Context) ([]stakingtypes.Validator, error) + GetDelegatorDelegations(ctx context.Context, delegator sdk.AccAddress, maxRetrieve uint16) ([]stakingtypes.Delegation, error) + GetValidator(ctx context.Context, addr sdk.ValAddress) (stakingtypes.Validator, error) + GetDelegation(ctx context.Context, delegatorAddr sdk.AccAddress, valAddr sdk.ValAddress) (stakingtypes.Delegation, error) + BeginRedelegation(ctx context.Context, delAddr sdk.AccAddress, valSrcAddr, valDstAddr sdk.ValAddress, sharesAmount sdkmath.LegacyDec) (completionTime time.Time, err error) + UnbondingTime(ctx context.Context) (time.Duration, error) + BondDenom(ctx context.Context) (string, error) +} + +// StakingQuerier defines the subset of staking query methods used by poolrebalancer. +type StakingQuerier interface { + DelegatorDelegations(ctx context.Context, req *stakingtypes.QueryDelegatorDelegationsRequest) (*stakingtypes.QueryDelegatorDelegationsResponse, error) +} + +// DistributionKeeper defines the subset of distribution keeper methods used by poolrebalancer. +type DistributionKeeper interface { + IterateValidatorSlashEventsBetween( + ctx context.Context, + val sdk.ValAddress, + startingHeight, endingHeight uint64, + handler func(height uint64, event distributiontypes.ValidatorSlashEvent) (stop bool), + ) +} + +// EVMKeeper defines the subset of vm keeper methods used by poolrebalancer. +type EVMKeeper interface { + CallEVM( + ctx sdk.Context, + abi abi.ABI, + from, contract common.Address, + commit bool, + gasCap *big.Int, + method string, + args ...any, + ) (*evmtypes.MsgEthereumTxResponse, error) + // IsContract reports whether the address holds non-delegated EVM bytecode (see x/vm/keeper.IsContract). + IsContract(ctx sdk.Context, address common.Address) bool +} + +// AccountKeeper defines the subset of auth keeper methods used by poolrebalancer. +type AccountKeeper interface { + GetAccount(ctx context.Context, addr sdk.AccAddress) sdk.AccountI + SetAccount(ctx context.Context, acc sdk.AccountI) + NewAccountWithAddress(ctx context.Context, addr sdk.AccAddress) sdk.AccountI +} diff --git a/x/poolrebalancer/types/keys.go b/x/poolrebalancer/types/keys.go new file mode 100644 index 00000000..e9b1e1cd --- /dev/null +++ b/x/poolrebalancer/types/keys.go @@ -0,0 +1,112 @@ +package types + +import ( + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/address" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" +) + +const ( + // ModuleName is the name of the poolrebalancer module (used in store keys and routing). + ModuleName = "poolrebalancer" + + // StoreKey is the default store key for the poolrebalancer module (same as ModuleName). + StoreKey = ModuleName + + // TransientStoreKey holds per-block scratch data (e.g. slash snapshots for EndBlock use). + TransientStoreKey = "transient_poolrebalancer" + + // RouterKey is the top-level router key for the module. + RouterKey = ModuleName +) + +var ( + // PreviousBlockSlashedValidatorsTransientKey stores the BeginBlock snapshot of relevant validators + // with slash events at height blockHeight-1 for EndBlock rebalance use. + PreviousBlockSlashedValidatorsTransientKey = []byte{0x02} +) + +// Store key prefixes (single-byte prefixes). +var ( + // ModuleEVMAddress is the EVM address of the poolrebalancer module account. + // Operators should set this as CommunityPool automationCaller for EndBlock automation. + ModuleEVMAddress common.Address + + ParamsKey = []byte{0x01} // module params + + // Pending redelegation tracking. + // Primary key: (delegator, denom, dstValidator, srcValidator, completionTime) + PendingRedelegationKey = []byte{0x11} + // Index by source validator: (srcValidator, completionTime, denom, dstValidator, delegator) + PendingRedelegationBySrcIndexKey = []byte{0x12} + // Queue by completion time: completionTime -> list of pending redelegation entries + PendingRedelegationQueueKey = []byte{0x13} + + // When set, EndBlock should run CommunityPool total-staked reconcile (also triggered on periodic sweep). + CommunityPoolReconcileDirtyKey = []byte{0x31} +) + +func init() { + // Keep derivation aligned with x/auth module account bytes -> EVM address mapping. + ModuleEVMAddress = common.BytesToAddress(authtypes.NewModuleAddress(ModuleName).Bytes()) +} + +// GetPendingRedelegationKey returns the primary key for a pending redelegation. +// Key format: prefix | lengthPrefixed(delegator) | lengthPrefixed(denom) | lengthPrefixed(dstValidator) | lengthPrefixed(srcValidator) | completionTime. +func GetPendingRedelegationKey(del sdk.AccAddress, denom string, srcVal, dstVal sdk.ValAddress, completion time.Time) []byte { + key := make([]byte, 0) + key = append(key, PendingRedelegationKey...) + key = append(key, address.MustLengthPrefix(del)...) + key = append(key, address.MustLengthPrefix([]byte(denom))...) + key = append(key, address.MustLengthPrefix(dstVal)...) + key = append(key, address.MustLengthPrefix(srcVal)...) + key = append(key, sdk.FormatTimeBytes(completion)...) + return key +} + +// GetPendingRedelegationBySrcIndexKey returns the index key for lookup by source validator. +// Key format: prefix | lengthPrefixed(srcValidator) | lengthPrefixed(completionTime) | lengthPrefixed(denom) | lengthPrefixed(dstVal) | lengthPrefixed(delegator). +func GetPendingRedelegationBySrcIndexKey(srcVal sdk.ValAddress, completion time.Time, denom string, dstVal sdk.ValAddress, del sdk.AccAddress) []byte { + key := make([]byte, 0) + key = append(key, PendingRedelegationBySrcIndexKey...) + key = append(key, address.MustLengthPrefix(srcVal)...) + key = append(key, address.MustLengthPrefix(sdk.FormatTimeBytes(completion))...) + key = append(key, address.MustLengthPrefix([]byte(denom))...) + key = append(key, address.MustLengthPrefix(dstVal)...) + key = append(key, address.MustLengthPrefix(del)...) + return key +} + +// GetPendingRedelegationQueueKey returns the queue key for a given completion time. +// Used to iterate pending redelegations that mature at or before a given time. +func GetPendingRedelegationQueueKey(completion time.Time) []byte { + key := make([]byte, 0) + key = append(key, PendingRedelegationQueueKey...) + key = append(key, sdk.FormatTimeBytes(completion)...) + return key +} + +// ParsePendingRedelegationQueueKey parses the completion time from a pending redelegation queue key. +// Key format: PendingRedelegationQueueKey (0x13) + FormatTimeBytes(completion). +func ParsePendingRedelegationQueueKey(key []byte) (time.Time, error) { + if len(key) <= len(PendingRedelegationQueueKey) { + return time.Time{}, fmt.Errorf("invalid pending redelegation queue key length") + } + return sdk.ParseTimeBytes(key[len(PendingRedelegationQueueKey):]) +} + +// GetPendingRedelegationPrefix returns the key prefix for (delegator, denom, dstValidator). +// Used by HasImmatureRedelegationTo to prefix-scan all completion times for this triple. +func GetPendingRedelegationPrefix(del sdk.AccAddress, denom string, dstVal sdk.ValAddress) []byte { + key := make([]byte, 0) + key = append(key, PendingRedelegationKey...) + key = append(key, address.MustLengthPrefix(del)...) + key = append(key, address.MustLengthPrefix([]byte(denom))...) + key = append(key, address.MustLengthPrefix(dstVal)...) + return key +} diff --git a/x/poolrebalancer/types/keys_test.go b/x/poolrebalancer/types/keys_test.go new file mode 100644 index 00000000..1574047f --- /dev/null +++ b/x/poolrebalancer/types/keys_test.go @@ -0,0 +1,15 @@ +package types + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" +) + +func TestModuleEVMAddress_Derivation(t *testing.T) { + expected := common.BytesToAddress(authtypes.NewModuleAddress(ModuleName).Bytes()) + require.Equal(t, expected, ModuleEVMAddress) +} diff --git a/x/poolrebalancer/types/msg.go b/x/poolrebalancer/types/msg.go new file mode 100644 index 00000000..5d4f2eeb --- /dev/null +++ b/x/poolrebalancer/types/msg.go @@ -0,0 +1,22 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var _ sdk.Msg = &MsgUpdateParams{} + +// ValidateBasic validates the message. +func (m *MsgUpdateParams) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(m.Authority); err != nil { + return errorsmod.Wrap(err, "invalid authority address") + } + return m.Params.Validate() +} + +// GetSignBytes implements the LegacyMsg interface. +func (m MsgUpdateParams) GetSignBytes() []byte { + return AminoCdc.MustMarshalJSON(&m) +} diff --git a/x/poolrebalancer/types/poolrebalancer.pb.go b/x/poolrebalancer/types/poolrebalancer.pb.go new file mode 100644 index 00000000..76c8f82d --- /dev/null +++ b/x/poolrebalancer/types/poolrebalancer.pb.go @@ -0,0 +1,1223 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cosmos/poolrebalancer/v1/poolrebalancer.proto + +package types + +import ( + cosmossdk_io_math "cosmossdk.io/math" + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + _ "google.golang.org/protobuf/types/known/timestamppb" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params defines the parameters for the poolrebalancer module. +type Params struct { + // pool_delegator_address is the account whose stake is rebalanced. + PoolDelegatorAddress string `protobuf:"bytes,1,opt,name=pool_delegator_address,json=poolDelegatorAddress,proto3" json:"pool_delegator_address,omitempty"` + // max_target_validators caps the bonded validator set size (top N by power). + MaxTargetValidators uint32 `protobuf:"varint,2,opt,name=max_target_validators,json=maxTargetValidators,proto3" json:"max_target_validators,omitempty"` + // rebalance_threshold_bp is the drift threshold in basis points. + RebalanceThresholdBp uint32 `protobuf:"varint,3,opt,name=rebalance_threshold_bp,json=rebalanceThresholdBp,proto3" json:"rebalance_threshold_bp,omitempty"` + // max_ops_per_block caps redelegation operations per block. + MaxOpsPerBlock uint32 `protobuf:"varint,4,opt,name=max_ops_per_block,json=maxOpsPerBlock,proto3" json:"max_ops_per_block,omitempty"` + // max_move_per_op caps the amount moved per operation (0 = no cap). + MaxMovePerOp cosmossdk_io_math.Int `protobuf:"bytes,5,opt,name=max_move_per_op,json=maxMovePerOp,proto3,customtype=cosmossdk.io/math.Int" json:"max_move_per_op"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_3fcdfba81f65d424, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +// PendingRedelegation is an in-flight redelegation tracked for transitive redelegation safety. +type PendingRedelegation struct { + DelegatorAddress string `protobuf:"bytes,1,opt,name=delegator_address,json=delegatorAddress,proto3" json:"delegator_address,omitempty"` + SrcValidatorAddress string `protobuf:"bytes,2,opt,name=src_validator_address,json=srcValidatorAddress,proto3" json:"src_validator_address,omitempty"` + DstValidatorAddress string `protobuf:"bytes,3,opt,name=dst_validator_address,json=dstValidatorAddress,proto3" json:"dst_validator_address,omitempty"` + Amount types.Coin `protobuf:"bytes,4,opt,name=amount,proto3" json:"amount"` + CompletionTime time.Time `protobuf:"bytes,5,opt,name=completion_time,json=completionTime,proto3,stdtime" json:"completion_time"` +} + +func (m *PendingRedelegation) Reset() { *m = PendingRedelegation{} } +func (m *PendingRedelegation) String() string { return proto.CompactTextString(m) } +func (*PendingRedelegation) ProtoMessage() {} +func (*PendingRedelegation) Descriptor() ([]byte, []int) { + return fileDescriptor_3fcdfba81f65d424, []int{1} +} +func (m *PendingRedelegation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PendingRedelegation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PendingRedelegation.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PendingRedelegation) XXX_Merge(src proto.Message) { + xxx_messageInfo_PendingRedelegation.Merge(m, src) +} +func (m *PendingRedelegation) XXX_Size() int { + return m.Size() +} +func (m *PendingRedelegation) XXX_DiscardUnknown() { + xxx_messageInfo_PendingRedelegation.DiscardUnknown(m) +} + +var xxx_messageInfo_PendingRedelegation proto.InternalMessageInfo + +// QueuedRedelegation groups redelegations that share the same completion time. +type QueuedRedelegation struct { + Entries []PendingRedelegation `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries"` +} + +func (m *QueuedRedelegation) Reset() { *m = QueuedRedelegation{} } +func (m *QueuedRedelegation) String() string { return proto.CompactTextString(m) } +func (*QueuedRedelegation) ProtoMessage() {} +func (*QueuedRedelegation) Descriptor() ([]byte, []int) { + return fileDescriptor_3fcdfba81f65d424, []int{2} +} +func (m *QueuedRedelegation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueuedRedelegation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueuedRedelegation.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueuedRedelegation) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueuedRedelegation.Merge(m, src) +} +func (m *QueuedRedelegation) XXX_Size() int { + return m.Size() +} +func (m *QueuedRedelegation) XXX_DiscardUnknown() { + xxx_messageInfo_QueuedRedelegation.DiscardUnknown(m) +} + +var xxx_messageInfo_QueuedRedelegation proto.InternalMessageInfo + +// GenesisState defines the poolrebalancer module's genesis state. +type GenesisState struct { + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` + // pending_redelegations allow restoring in-flight state on restart. + // They are optional for initial deployments. + PendingRedelegations []PendingRedelegation `protobuf:"bytes,2,rep,name=pending_redelegations,json=pendingRedelegations,proto3" json:"pending_redelegations"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_3fcdfba81f65d424, []int{3} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Params)(nil), "cosmos.poolrebalancer.v1.Params") + proto.RegisterType((*PendingRedelegation)(nil), "cosmos.poolrebalancer.v1.PendingRedelegation") + proto.RegisterType((*QueuedRedelegation)(nil), "cosmos.poolrebalancer.v1.QueuedRedelegation") + proto.RegisterType((*GenesisState)(nil), "cosmos.poolrebalancer.v1.GenesisState") +} + +func init() { + proto.RegisterFile("cosmos/poolrebalancer/v1/poolrebalancer.proto", fileDescriptor_3fcdfba81f65d424) +} + +var fileDescriptor_3fcdfba81f65d424 = []byte{ + // 638 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcf, 0x6e, 0xd3, 0x4c, + 0x14, 0xc5, 0xe3, 0x24, 0x5f, 0xda, 0x6f, 0x52, 0xfa, 0xc7, 0x4d, 0x21, 0x54, 0xc2, 0xa9, 0xb2, + 0x2a, 0x82, 0xda, 0x4a, 0x40, 0x62, 0x87, 0x44, 0xa8, 0x84, 0xa8, 0x54, 0x35, 0x98, 0x8a, 0x05, + 0x1b, 0x6b, 0x6c, 0xdf, 0x3a, 0x56, 0x3d, 0x9e, 0xd1, 0xcc, 0xd8, 0x0a, 0x6f, 0xd1, 0x67, 0x61, + 0xc5, 0x23, 0x74, 0xd9, 0x0d, 0x12, 0x62, 0x51, 0xa0, 0x7d, 0x11, 0x34, 0x63, 0xc7, 0x2d, 0x6d, + 0xca, 0x82, 0x9d, 0x33, 0xe7, 0x9e, 0x7b, 0x67, 0x7e, 0x73, 0x32, 0x68, 0x27, 0xa0, 0x82, 0x50, + 0xe1, 0x30, 0x4a, 0x13, 0x0e, 0x3e, 0x4e, 0x70, 0x1a, 0x00, 0x77, 0xf2, 0xc1, 0x8d, 0x15, 0x9b, + 0x71, 0x2a, 0xa9, 0xd9, 0x2d, 0xca, 0xed, 0x1b, 0x62, 0x3e, 0xd8, 0xb4, 0xca, 0x46, 0x3e, 0x16, + 0xe0, 0xe4, 0x03, 0x1f, 0x24, 0x1e, 0x38, 0x01, 0x8d, 0xd3, 0xc2, 0xb9, 0xd9, 0x89, 0x68, 0x44, + 0xf5, 0xa7, 0xa3, 0xbe, 0xca, 0xd5, 0x5e, 0x44, 0x69, 0x94, 0x80, 0xa3, 0x7f, 0xf9, 0xd9, 0x91, + 0x23, 0x63, 0x02, 0x42, 0x62, 0xc2, 0x8a, 0x82, 0xfe, 0x97, 0x3a, 0x6a, 0x8d, 0x31, 0xc7, 0x44, + 0x98, 0xcf, 0xd1, 0x7d, 0x35, 0xd6, 0x0b, 0x21, 0x81, 0x08, 0x4b, 0xca, 0x3d, 0x1c, 0x86, 0x1c, + 0x84, 0xe8, 0x1a, 0x5b, 0xc6, 0xf6, 0xff, 0x6e, 0x47, 0xa9, 0xbb, 0x33, 0xf1, 0x55, 0xa1, 0x99, + 0x43, 0xb4, 0x41, 0xf0, 0xd4, 0x93, 0x98, 0x47, 0x20, 0xbd, 0x1c, 0x27, 0x71, 0xa8, 0x64, 0xd1, + 0xad, 0x6f, 0x19, 0xdb, 0xf7, 0xdc, 0x75, 0x82, 0xa7, 0x87, 0x5a, 0xfb, 0x50, 0x49, 0x6a, 0x52, + 0x75, 0x38, 0x4f, 0x4e, 0x38, 0x88, 0x09, 0x4d, 0x42, 0xcf, 0x67, 0xdd, 0x86, 0x36, 0x75, 0x2a, + 0xf5, 0x70, 0x26, 0x8e, 0x98, 0xf9, 0x18, 0xad, 0xa9, 0x49, 0x94, 0x09, 0x8f, 0x01, 0xf7, 0xfc, + 0x84, 0x06, 0xc7, 0xdd, 0xa6, 0x36, 0x2c, 0x13, 0x3c, 0x3d, 0x60, 0x62, 0x0c, 0x7c, 0xa4, 0x56, + 0xcd, 0x5d, 0xb4, 0xa2, 0x4a, 0x09, 0xcd, 0x41, 0xd7, 0x52, 0xd6, 0xfd, 0x4f, 0x9d, 0x61, 0xf4, + 0xe8, 0xf4, 0xbc, 0x57, 0xfb, 0x7e, 0xde, 0xdb, 0x28, 0x68, 0x8a, 0xf0, 0xd8, 0x8e, 0xa9, 0x43, + 0xb0, 0x9c, 0xd8, 0x6f, 0x53, 0xe9, 0x2e, 0x11, 0x3c, 0xdd, 0xa7, 0x39, 0x8c, 0x81, 0x1f, 0xb0, + 0xbd, 0xe6, 0x62, 0x6b, 0x75, 0xc1, 0x7d, 0x90, 0x09, 0xf0, 0xb2, 0xb4, 0xa4, 0x02, 0xde, 0x11, + 0x4e, 0x12, 0x1f, 0x07, 0xc7, 0xfd, 0xcf, 0x75, 0xb4, 0x3e, 0x86, 0x34, 0x8c, 0xd3, 0xc8, 0x85, + 0x52, 0x8e, 0x69, 0x6a, 0x3e, 0x41, 0x6b, 0x77, 0x21, 0x5c, 0x0d, 0xe7, 0xe0, 0x13, 0x3c, 0xb8, + 0xe2, 0x56, 0x19, 0xea, 0xda, 0xb0, 0x2e, 0x78, 0x50, 0x81, 0xbb, 0xe6, 0x09, 0x85, 0x9c, 0xe3, + 0x69, 0x14, 0x9e, 0x50, 0xc8, 0x5b, 0x9e, 0x17, 0xa8, 0x85, 0x09, 0xcd, 0x52, 0xa9, 0x89, 0xb5, + 0x87, 0x0f, 0xed, 0x32, 0x69, 0x2a, 0x4f, 0x76, 0x99, 0x27, 0xfb, 0x35, 0x8d, 0xd3, 0x51, 0x53, + 0x31, 0x72, 0xcb, 0x72, 0x73, 0x1f, 0xad, 0x04, 0x94, 0xb0, 0x04, 0xd4, 0xd9, 0x3c, 0x15, 0x1f, + 0x8d, 0xb2, 0x3d, 0xdc, 0xb4, 0x8b, 0x6c, 0xd9, 0xb3, 0x6c, 0xd9, 0x87, 0xb3, 0x6c, 0x8d, 0x16, + 0x55, 0x8b, 0x93, 0x1f, 0x3d, 0xc3, 0x5d, 0xbe, 0x32, 0x2b, 0xb9, 0x1f, 0x20, 0xf3, 0x5d, 0x06, + 0x19, 0x84, 0x7f, 0x20, 0xdb, 0x47, 0x0b, 0x90, 0x4a, 0x1e, 0x83, 0x02, 0xd5, 0xd8, 0x6e, 0x0f, + 0x77, 0xec, 0xbb, 0xfe, 0x08, 0xf6, 0x1c, 0xe4, 0xe5, 0x96, 0x67, 0x3d, 0xfa, 0x5f, 0x0d, 0xb4, + 0xf4, 0x06, 0x52, 0x10, 0xb1, 0x78, 0x2f, 0xb1, 0x04, 0xf3, 0x25, 0x6a, 0x31, 0x1d, 0x72, 0x7d, + 0x0f, 0xed, 0xe1, 0xd6, 0x5f, 0xda, 0xeb, 0xba, 0x19, 0x84, 0xc2, 0x65, 0x4e, 0xd0, 0x06, 0x2b, + 0xc6, 0x7a, 0xfc, 0xda, 0x5c, 0x75, 0x4b, 0xff, 0xbc, 0xdb, 0x0e, 0xbb, 0x2d, 0x89, 0xbd, 0xe6, + 0x62, 0x63, 0xb5, 0xe9, 0x56, 0xd3, 0xaa, 0xdc, 0x29, 0x71, 0xb4, 0x77, 0xfa, 0xcb, 0xaa, 0x9d, + 0x5e, 0x58, 0xc6, 0xd9, 0x85, 0x65, 0xfc, 0xbc, 0xb0, 0x8c, 0x93, 0x4b, 0xab, 0x76, 0x76, 0x69, + 0xd5, 0xbe, 0x5d, 0x5a, 0xb5, 0x8f, 0x4f, 0xa3, 0x58, 0x4e, 0x32, 0xdf, 0x0e, 0x28, 0x71, 0xca, + 0xc7, 0x02, 0x72, 0xe2, 0x4c, 0x6f, 0xbe, 0x3d, 0xf2, 0x13, 0x03, 0xe1, 0xb7, 0xf4, 0xb5, 0x3d, + 0xfb, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xd7, 0xbd, 0xa4, 0xa3, 0xa1, 0x04, 0x00, 0x00, +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.MaxMovePerOp.Size() + i -= size + if _, err := m.MaxMovePerOp.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintPoolrebalancer(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if m.MaxOpsPerBlock != 0 { + i = encodeVarintPoolrebalancer(dAtA, i, uint64(m.MaxOpsPerBlock)) + i-- + dAtA[i] = 0x20 + } + if m.RebalanceThresholdBp != 0 { + i = encodeVarintPoolrebalancer(dAtA, i, uint64(m.RebalanceThresholdBp)) + i-- + dAtA[i] = 0x18 + } + if m.MaxTargetValidators != 0 { + i = encodeVarintPoolrebalancer(dAtA, i, uint64(m.MaxTargetValidators)) + i-- + dAtA[i] = 0x10 + } + if len(m.PoolDelegatorAddress) > 0 { + i -= len(m.PoolDelegatorAddress) + copy(dAtA[i:], m.PoolDelegatorAddress) + i = encodeVarintPoolrebalancer(dAtA, i, uint64(len(m.PoolDelegatorAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PendingRedelegation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PendingRedelegation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PendingRedelegation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n1, err1 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.CompletionTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.CompletionTime):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintPoolrebalancer(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x2a + { + size, err := m.Amount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPoolrebalancer(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.DstValidatorAddress) > 0 { + i -= len(m.DstValidatorAddress) + copy(dAtA[i:], m.DstValidatorAddress) + i = encodeVarintPoolrebalancer(dAtA, i, uint64(len(m.DstValidatorAddress))) + i-- + dAtA[i] = 0x1a + } + if len(m.SrcValidatorAddress) > 0 { + i -= len(m.SrcValidatorAddress) + copy(dAtA[i:], m.SrcValidatorAddress) + i = encodeVarintPoolrebalancer(dAtA, i, uint64(len(m.SrcValidatorAddress))) + i-- + dAtA[i] = 0x12 + } + if len(m.DelegatorAddress) > 0 { + i -= len(m.DelegatorAddress) + copy(dAtA[i:], m.DelegatorAddress) + i = encodeVarintPoolrebalancer(dAtA, i, uint64(len(m.DelegatorAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueuedRedelegation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueuedRedelegation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueuedRedelegation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Entries) > 0 { + for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPoolrebalancer(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PendingRedelegations) > 0 { + for iNdEx := len(m.PendingRedelegations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PendingRedelegations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPoolrebalancer(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPoolrebalancer(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintPoolrebalancer(dAtA []byte, offset int, v uint64) int { + offset -= sovPoolrebalancer(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PoolDelegatorAddress) + if l > 0 { + n += 1 + l + sovPoolrebalancer(uint64(l)) + } + if m.MaxTargetValidators != 0 { + n += 1 + sovPoolrebalancer(uint64(m.MaxTargetValidators)) + } + if m.RebalanceThresholdBp != 0 { + n += 1 + sovPoolrebalancer(uint64(m.RebalanceThresholdBp)) + } + if m.MaxOpsPerBlock != 0 { + n += 1 + sovPoolrebalancer(uint64(m.MaxOpsPerBlock)) + } + l = m.MaxMovePerOp.Size() + n += 1 + l + sovPoolrebalancer(uint64(l)) + return n +} + +func (m *PendingRedelegation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DelegatorAddress) + if l > 0 { + n += 1 + l + sovPoolrebalancer(uint64(l)) + } + l = len(m.SrcValidatorAddress) + if l > 0 { + n += 1 + l + sovPoolrebalancer(uint64(l)) + } + l = len(m.DstValidatorAddress) + if l > 0 { + n += 1 + l + sovPoolrebalancer(uint64(l)) + } + l = m.Amount.Size() + n += 1 + l + sovPoolrebalancer(uint64(l)) + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.CompletionTime) + n += 1 + l + sovPoolrebalancer(uint64(l)) + return n +} + +func (m *QueuedRedelegation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovPoolrebalancer(uint64(l)) + } + } + return n +} + +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovPoolrebalancer(uint64(l)) + if len(m.PendingRedelegations) > 0 { + for _, e := range m.PendingRedelegations { + l = e.Size() + n += 1 + l + sovPoolrebalancer(uint64(l)) + } + } + return n +} + +func sovPoolrebalancer(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPoolrebalancer(x uint64) (n int) { + return sovPoolrebalancer(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolDelegatorAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPoolrebalancer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPoolrebalancer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PoolDelegatorAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTargetValidators", wireType) + } + m.MaxTargetValidators = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTargetValidators |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RebalanceThresholdBp", wireType) + } + m.RebalanceThresholdBp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RebalanceThresholdBp |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxOpsPerBlock", wireType) + } + m.MaxOpsPerBlock = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxOpsPerBlock |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxMovePerOp", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPoolrebalancer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPoolrebalancer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxMovePerOp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPoolrebalancer(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPoolrebalancer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PendingRedelegation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PendingRedelegation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PendingRedelegation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DelegatorAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPoolrebalancer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPoolrebalancer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DelegatorAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SrcValidatorAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPoolrebalancer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPoolrebalancer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SrcValidatorAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DstValidatorAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPoolrebalancer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPoolrebalancer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DstValidatorAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPoolrebalancer + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPoolrebalancer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPoolrebalancer + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPoolrebalancer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.CompletionTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPoolrebalancer(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPoolrebalancer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueuedRedelegation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueuedRedelegation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueuedRedelegation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPoolrebalancer + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPoolrebalancer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, PendingRedelegation{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPoolrebalancer(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPoolrebalancer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPoolrebalancer + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPoolrebalancer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingRedelegations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPoolrebalancer + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPoolrebalancer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PendingRedelegations = append(m.PendingRedelegations, PendingRedelegation{}) + if err := m.PendingRedelegations[len(m.PendingRedelegations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPoolrebalancer(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPoolrebalancer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPoolrebalancer(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPoolrebalancer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPoolrebalancer + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPoolrebalancer + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPoolrebalancer + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPoolrebalancer = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPoolrebalancer = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPoolrebalancer = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/poolrebalancer/types/query.pb.go b/x/poolrebalancer/types/query.pb.go new file mode 100644 index 00000000..1444a2fb --- /dev/null +++ b/x/poolrebalancer/types/query.pb.go @@ -0,0 +1,982 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cosmos/poolrebalancer/v1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryParamsRequest is the request type for the Query/Params RPC method. +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_882dee8c3ee6b12d, []int{0} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse is the response type for the Query/Params RPC method. +type QueryParamsResponse struct { + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_882dee8c3ee6b12d, []int{1} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +// QueryPendingRedelegationsRequest is the request type for the Query/PendingRedelegations RPC method. +type QueryPendingRedelegationsRequest struct { + // pagination defines an optional pagination for the request. + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryPendingRedelegationsRequest) Reset() { *m = QueryPendingRedelegationsRequest{} } +func (m *QueryPendingRedelegationsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryPendingRedelegationsRequest) ProtoMessage() {} +func (*QueryPendingRedelegationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_882dee8c3ee6b12d, []int{2} +} +func (m *QueryPendingRedelegationsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPendingRedelegationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPendingRedelegationsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPendingRedelegationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPendingRedelegationsRequest.Merge(m, src) +} +func (m *QueryPendingRedelegationsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryPendingRedelegationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPendingRedelegationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPendingRedelegationsRequest proto.InternalMessageInfo + +// QueryPendingRedelegationsResponse is the response type for the Query/PendingRedelegations RPC method. +type QueryPendingRedelegationsResponse struct { + Redelegations []PendingRedelegation `protobuf:"bytes,1,rep,name=redelegations,proto3" json:"redelegations"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryPendingRedelegationsResponse) Reset() { *m = QueryPendingRedelegationsResponse{} } +func (m *QueryPendingRedelegationsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryPendingRedelegationsResponse) ProtoMessage() {} +func (*QueryPendingRedelegationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_882dee8c3ee6b12d, []int{3} +} +func (m *QueryPendingRedelegationsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPendingRedelegationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPendingRedelegationsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPendingRedelegationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPendingRedelegationsResponse.Merge(m, src) +} +func (m *QueryPendingRedelegationsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryPendingRedelegationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPendingRedelegationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPendingRedelegationsResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*QueryParamsRequest)(nil), "cosmos.poolrebalancer.v1.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "cosmos.poolrebalancer.v1.QueryParamsResponse") + proto.RegisterType((*QueryPendingRedelegationsRequest)(nil), "cosmos.poolrebalancer.v1.QueryPendingRedelegationsRequest") + proto.RegisterType((*QueryPendingRedelegationsResponse)(nil), "cosmos.poolrebalancer.v1.QueryPendingRedelegationsResponse") +} + +func init() { + proto.RegisterFile("cosmos/poolrebalancer/v1/query.proto", fileDescriptor_882dee8c3ee6b12d) +} + +var fileDescriptor_882dee8c3ee6b12d = []byte{ + // 457 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xcf, 0x8a, 0xd4, 0x30, + 0x18, 0x6f, 0x46, 0x1c, 0x30, 0x8b, 0x07, 0xe3, 0x1c, 0x86, 0x41, 0x6a, 0x2d, 0xa2, 0xc3, 0xb2, + 0x93, 0xd0, 0xf1, 0x20, 0xe8, 0x6d, 0x05, 0x05, 0x4f, 0xeb, 0x1c, 0xf7, 0xa0, 0xa4, 0xb3, 0x1f, + 0xb1, 0xd2, 0x26, 0xdd, 0xa6, 0x53, 0xdc, 0xab, 0x2f, 0xa0, 0xe0, 0x4b, 0x78, 0xf4, 0x31, 0x7a, + 0xf0, 0xb0, 0xe0, 0xc5, 0x93, 0xe8, 0xcc, 0x82, 0xaf, 0x21, 0x93, 0x64, 0xb1, 0xdd, 0xb5, 0x33, + 0xe2, 0xa5, 0x84, 0xe4, 0xfb, 0xfd, 0x4d, 0x8a, 0xef, 0xce, 0x95, 0xce, 0x94, 0x66, 0xb9, 0x52, + 0x69, 0x01, 0x31, 0x4f, 0xb9, 0x9c, 0x43, 0xc1, 0xaa, 0x88, 0x1d, 0x2f, 0xa0, 0x38, 0xa1, 0x79, + 0xa1, 0x4a, 0x45, 0x86, 0x76, 0x8a, 0xb6, 0xa7, 0x68, 0x15, 0x8d, 0x6e, 0xf0, 0x2c, 0x91, 0x8a, + 0x99, 0xaf, 0x1d, 0x1e, 0xed, 0x3a, 0xca, 0x98, 0x6b, 0xb0, 0x2c, 0xac, 0x8a, 0x62, 0x28, 0x79, + 0xc4, 0x72, 0x2e, 0x12, 0xc9, 0xcb, 0x44, 0x49, 0x37, 0x3b, 0xe9, 0x94, 0xbf, 0x20, 0x65, 0xc7, + 0x07, 0x42, 0x09, 0x65, 0x96, 0x6c, 0xbd, 0x72, 0xbb, 0xb7, 0x84, 0x52, 0x22, 0x05, 0xc6, 0xf3, + 0x84, 0x71, 0x29, 0x55, 0x69, 0x14, 0xb4, 0x3d, 0x0d, 0x07, 0x98, 0xbc, 0x58, 0x9b, 0x38, 0xe0, + 0x05, 0xcf, 0xf4, 0x0c, 0x8e, 0x17, 0xa0, 0xcb, 0xf0, 0x10, 0xdf, 0x6c, 0xed, 0xea, 0x5c, 0x49, + 0x0d, 0xe4, 0x09, 0xee, 0xe7, 0x66, 0x67, 0x88, 0x02, 0x34, 0xde, 0x99, 0x06, 0xb4, 0x2b, 0x39, + 0xb5, 0xc8, 0xfd, 0x6b, 0xf5, 0xf7, 0xdb, 0xde, 0xa7, 0x5f, 0x9f, 0x77, 0xd1, 0xcc, 0x41, 0xc3, + 0x37, 0x38, 0xb0, 0xdc, 0x20, 0x8f, 0x12, 0x29, 0x66, 0x70, 0x04, 0x29, 0x08, 0x6b, 0xca, 0xe9, + 0x93, 0xa7, 0x18, 0xff, 0x29, 0xc3, 0x89, 0xdd, 0x3b, 0x17, 0x5b, 0x37, 0x47, 0x6d, 0xff, 0xae, + 0x39, 0x7a, 0xc0, 0x05, 0x38, 0xec, 0xac, 0x81, 0x0c, 0xbf, 0x20, 0x7c, 0x67, 0x83, 0x98, 0x8b, + 0xf5, 0x12, 0x5f, 0x2f, 0x9a, 0x07, 0x43, 0x14, 0x5c, 0x19, 0xef, 0x4c, 0x27, 0x1b, 0xd2, 0x5d, + 0xa6, 0x6b, 0x46, 0x6d, 0xd3, 0x91, 0x67, 0xad, 0x34, 0x3d, 0x93, 0xe6, 0xfe, 0xd6, 0x34, 0xd6, + 0x5c, 0x33, 0xce, 0xf4, 0xac, 0x87, 0xaf, 0x9a, 0x38, 0xe4, 0x3d, 0xc2, 0x7d, 0x5b, 0x31, 0xd9, + 0xeb, 0xb6, 0x79, 0xf9, 0x66, 0x47, 0x93, 0x7f, 0x9c, 0xb6, 0xea, 0xe1, 0xf8, 0xdd, 0xd7, 0xb3, + 0x8f, 0xbd, 0x90, 0x04, 0xac, 0xfb, 0x29, 0x5a, 0x1b, 0x35, 0xc2, 0x83, 0xbf, 0xb5, 0x4c, 0x1e, + 0x6d, 0x53, 0xec, 0x7e, 0x07, 0xa3, 0xc7, 0xff, 0x85, 0x75, 0xde, 0x1f, 0x1a, 0xef, 0x11, 0x61, + 0x1b, 0xbc, 0x5b, 0xfc, 0xab, 0xd6, 0x7d, 0xed, 0x3f, 0xaf, 0x7f, 0xfa, 0x5e, 0xbd, 0xf4, 0xd1, + 0xe9, 0xd2, 0x47, 0x3f, 0x96, 0x3e, 0xfa, 0xb0, 0xf2, 0xbd, 0xd3, 0x95, 0xef, 0x7d, 0x5b, 0xf9, + 0xde, 0xe1, 0x9e, 0x48, 0xca, 0xd7, 0x8b, 0x98, 0xce, 0x55, 0x76, 0x4e, 0x0c, 0x55, 0xc6, 0xde, + 0x5e, 0xa4, 0x2f, 0x4f, 0x72, 0xd0, 0x71, 0xdf, 0xfc, 0x66, 0x0f, 0x7e, 0x07, 0x00, 0x00, 0xff, + 0xff, 0x8a, 0x0c, 0x10, 0x8d, 0x4a, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Params returns the poolrebalancer module params. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) + // PendingRedelegations returns tracked in-flight redelegations. + PendingRedelegations(ctx context.Context, in *QueryPendingRedelegationsRequest, opts ...grpc.CallOption) (*QueryPendingRedelegationsResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/cosmos.poolrebalancer.v1.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) PendingRedelegations(ctx context.Context, in *QueryPendingRedelegationsRequest, opts ...grpc.CallOption) (*QueryPendingRedelegationsResponse, error) { + out := new(QueryPendingRedelegationsResponse) + err := c.cc.Invoke(ctx, "/cosmos.poolrebalancer.v1.Query/PendingRedelegations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Params returns the poolrebalancer module params. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) + // PendingRedelegations returns tracked in-flight redelegations. + PendingRedelegations(context.Context, *QueryPendingRedelegationsRequest) (*QueryPendingRedelegationsResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} +func (*UnimplementedQueryServer) PendingRedelegations(ctx context.Context, req *QueryPendingRedelegationsRequest) (*QueryPendingRedelegationsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PendingRedelegations not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cosmos.poolrebalancer.v1.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_PendingRedelegations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryPendingRedelegationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).PendingRedelegations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cosmos.poolrebalancer.v1.Query/PendingRedelegations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).PendingRedelegations(ctx, req.(*QueryPendingRedelegationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cosmos.poolrebalancer.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + { + MethodName: "PendingRedelegations", + Handler: _Query_PendingRedelegations_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cosmos/poolrebalancer/v1/query.proto", +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryPendingRedelegationsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPendingRedelegationsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPendingRedelegationsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryPendingRedelegationsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPendingRedelegationsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPendingRedelegationsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Redelegations) > 0 { + for iNdEx := len(m.Redelegations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Redelegations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryPendingRedelegationsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryPendingRedelegationsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Redelegations) > 0 { + for _, e := range m.Redelegations { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPendingRedelegationsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPendingRedelegationsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPendingRedelegationsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPendingRedelegationsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPendingRedelegationsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPendingRedelegationsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Redelegations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Redelegations = append(m.Redelegations, PendingRedelegation{}) + if err := m.Redelegations[len(m.Redelegations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/poolrebalancer/types/query.pb.gw.go b/x/poolrebalancer/types/query.pb.gw.go new file mode 100644 index 00000000..4df6bd0b --- /dev/null +++ b/x/poolrebalancer/types/query.pb.gw.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: cosmos/poolrebalancer/v1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_PendingRedelegations_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_PendingRedelegations_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPendingRedelegationsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_PendingRedelegations_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.PendingRedelegations(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_PendingRedelegations_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPendingRedelegationsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_PendingRedelegations_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.PendingRedelegations(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_PendingRedelegations_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_PendingRedelegations_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_PendingRedelegations_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_PendingRedelegations_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_PendingRedelegations_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_PendingRedelegations_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"cosmos", "poolrebalancer", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_PendingRedelegations_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"cosmos", "poolrebalancer", "v1", "pending_redelegations"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_Params_0 = runtime.ForwardResponseMessage + + forward_Query_PendingRedelegations_0 = runtime.ForwardResponseMessage +) diff --git a/x/poolrebalancer/types/tx.pb.go b/x/poolrebalancer/types/tx.pb.go new file mode 100644 index 00000000..6681b85f --- /dev/null +++ b/x/poolrebalancer/types/tx.pb.go @@ -0,0 +1,596 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cosmos/poolrebalancer/v1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgUpdateParams defines a Msg for updating the x/poolrebalancer module parameters. +type MsgUpdateParams struct { + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // params defines the x/poolrebalancer parameters to update. + // NOTE: All parameters must be supplied. + Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"` +} + +func (m *MsgUpdateParams) Reset() { *m = MsgUpdateParams{} } +func (m *MsgUpdateParams) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParams) ProtoMessage() {} +func (*MsgUpdateParams) Descriptor() ([]byte, []int) { + return fileDescriptor_33319066f5e58215, []int{0} +} +func (m *MsgUpdateParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParams.Merge(m, src) +} +func (m *MsgUpdateParams) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParams) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParams.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParams proto.InternalMessageInfo + +func (m *MsgUpdateParams) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgUpdateParams) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +// MsgUpdateParamsResponse defines the response structure for executing a MsgUpdateParams message. +type MsgUpdateParamsResponse struct { +} + +func (m *MsgUpdateParamsResponse) Reset() { *m = MsgUpdateParamsResponse{} } +func (m *MsgUpdateParamsResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParamsResponse) ProtoMessage() {} +func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_33319066f5e58215, []int{1} +} +func (m *MsgUpdateParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParamsResponse.Merge(m, src) +} +func (m *MsgUpdateParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParamsResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgUpdateParams)(nil), "cosmos.poolrebalancer.v1.MsgUpdateParams") + proto.RegisterType((*MsgUpdateParamsResponse)(nil), "cosmos.poolrebalancer.v1.MsgUpdateParamsResponse") +} + +func init() { proto.RegisterFile("cosmos/poolrebalancer/v1/tx.proto", fileDescriptor_33319066f5e58215) } + +var fileDescriptor_33319066f5e58215 = []byte{ + // 348 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xce, 0x2f, 0xce, + 0xcd, 0x2f, 0xd6, 0x2f, 0xc8, 0xcf, 0xcf, 0x29, 0x4a, 0x4d, 0x4a, 0xcc, 0x49, 0xcc, 0x4b, 0x4e, + 0x2d, 0xd2, 0x2f, 0x33, 0xd4, 0x2f, 0xa9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x80, + 0x28, 0xd1, 0x43, 0x55, 0xa2, 0x57, 0x66, 0x28, 0x25, 0x98, 0x98, 0x9b, 0x99, 0x97, 0xaf, 0x0f, + 0x26, 0x21, 0x8a, 0xa5, 0x74, 0x71, 0x9a, 0x87, 0xa6, 0x1d, 0xa2, 0x5c, 0x1c, 0xaa, 0x3c, 0xb7, + 0x38, 0x1d, 0xa4, 0x26, 0xb7, 0x38, 0x1d, 0x2a, 0x21, 0x09, 0x91, 0x88, 0x07, 0xf3, 0xf4, 0x61, + 0x2e, 0x00, 0x4b, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x43, 0xc4, 0x41, 0x2c, 0x88, 0xa8, 0xd2, 0x69, + 0x46, 0x2e, 0x7e, 0xdf, 0xe2, 0xf4, 0xd0, 0x82, 0x94, 0xc4, 0x92, 0xd4, 0x80, 0xc4, 0xa2, 0xc4, + 0xdc, 0x62, 0x21, 0x33, 0x2e, 0xce, 0xc4, 0xd2, 0x92, 0x8c, 0xfc, 0xa2, 0xcc, 0x92, 0x4a, 0x09, + 0x46, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0x89, 0x4b, 0x5b, 0x74, 0x45, 0xa0, 0xc6, 0x39, 0xa6, 0xa4, + 0x14, 0xa5, 0x16, 0x17, 0x07, 0x97, 0x14, 0x65, 0xe6, 0xa5, 0x07, 0x21, 0x94, 0x0a, 0x39, 0x73, + 0xb1, 0x15, 0x80, 0x4d, 0x90, 0x60, 0x52, 0x60, 0xd4, 0xe0, 0x36, 0x52, 0xd0, 0xc3, 0x15, 0x04, + 0x7a, 0x10, 0x9b, 0x9c, 0x38, 0x4f, 0xdc, 0x93, 0x67, 0x58, 0xf1, 0x7c, 0x83, 0x16, 0x63, 0x10, + 0x54, 0xab, 0x95, 0x75, 0xd3, 0xf3, 0x0d, 0x5a, 0x08, 0x43, 0xbb, 0x9e, 0x6f, 0xd0, 0xd2, 0xc0, + 0x19, 0x38, 0x68, 0x2e, 0x57, 0x92, 0xe4, 0x12, 0x47, 0x13, 0x0a, 0x4a, 0x2d, 0x2e, 0xc8, 0xcf, + 0x2b, 0x4e, 0x35, 0xaa, 0xe2, 0x62, 0xf6, 0x2d, 0x4e, 0x17, 0xca, 0xe1, 0xe2, 0x41, 0xf1, 0xab, + 0x26, 0x6e, 0x37, 0xa2, 0x99, 0x24, 0x65, 0x48, 0xb4, 0x52, 0x98, 0xa5, 0x52, 0xac, 0x0d, 0x20, + 0xbf, 0x39, 0xb9, 0x9d, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, + 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x4e, 0x7a, + 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0x2e, 0x34, 0xb6, 0xf4, 0x53, 0xcb, 0x72, 0xf5, + 0x2b, 0xd0, 0xfd, 0x5a, 0x52, 0x59, 0x90, 0x5a, 0x9c, 0xc4, 0x06, 0x8e, 0x33, 0x63, 0x40, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xa5, 0xe7, 0x91, 0x58, 0x7e, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // UpdateParams is a governance operation for updating the x/poolrebalancer module parameters. + // The authority is the Cosmos SDK x/gov module account. + UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) { + out := new(MsgUpdateParamsResponse) + err := c.cc.Invoke(ctx, "/cosmos.poolrebalancer.v1.Msg/UpdateParams", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // UpdateParams is a governance operation for updating the x/poolrebalancer module parameters. + // The authority is the Cosmos SDK x/gov module account. + UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) UpdateParams(ctx context.Context, req *MsgUpdateParams) (*MsgUpdateParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateParams) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateParams(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cosmos.poolrebalancer.v1.Msg/UpdateParams", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateParams(ctx, req.(*MsgUpdateParams)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cosmos.poolrebalancer.v1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UpdateParams", + Handler: _Msg_UpdateParams_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cosmos/poolrebalancer/v1/tx.proto", +} + +func (m *MsgUpdateParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgUpdateParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.Params.Size() + n += 1 + l + sovTx(uint64(l)) + return n +} + +func (m *MsgUpdateParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgUpdateParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +)