From b3e5f012c6d1eeb214faf6835e4b5bed6faec4cb Mon Sep 17 00:00:00 2001 From: Akarsh Ellore Sreenath Date: Mon, 16 Mar 2026 18:02:15 +0530 Subject: [PATCH] Add flex CIDR allocator controller --- .../oci-cloud-controller-manager.yaml | 3 + pkg/cloudprovider/providers/oci/ccm.go | 14 + pkg/cloudprovider/providers/oci/ccm_test.go | 18 + .../providers/oci/flex_cidr_controller.go | 253 ++++++++++ .../oci/flex_cidr_controller_test.go | 66 +++ .../providers/oci/instances_test.go | 8 + pkg/csi/driver/bv_controller_test.go | 16 + pkg/flexcidr/flexcidr.go | 447 ++++++++++++++++++ pkg/flexcidr/flexcidr_test.go | 445 +++++++++++++++++ pkg/oci/client/client.go | 6 +- pkg/oci/client/client_test.go | 8 + pkg/oci/client/errors.go | 55 +++ pkg/oci/client/networking.go | 104 +++- pkg/oci/client/networking_test.go | 125 +++++ pkg/volume/provisioner/block/block_test.go | 17 + pkg/volume/provisioner/fss/fss_test.go | 8 + 16 files changed, 1581 insertions(+), 12 deletions(-) create mode 100644 pkg/cloudprovider/providers/oci/flex_cidr_controller.go create mode 100644 pkg/cloudprovider/providers/oci/flex_cidr_controller_test.go create mode 100644 pkg/flexcidr/flexcidr.go create mode 100644 pkg/flexcidr/flexcidr_test.go diff --git a/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml b/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml index 0ea462cf8a..0a531e9e5c 100644 --- a/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml +++ b/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml @@ -50,6 +50,9 @@ spec: - --leader-elect-resource-lock=leases - --concurrent-service-syncs=3 - --v=2 + env: + - name: ENABLE_FLEX_CIDR_CONTROLLER + value: "false" volumeMounts: - name: cfg mountPath: /etc/oci diff --git a/pkg/cloudprovider/providers/oci/ccm.go b/pkg/cloudprovider/providers/oci/ccm.go index acb2a05d42..f273ee3405 100644 --- a/pkg/cloudprovider/providers/oci/ccm.go +++ b/pkg/cloudprovider/providers/oci/ccm.go @@ -51,6 +51,7 @@ const ( providerName = "oci" providerPrefix = providerName + "://" + enableFlexCIDRController = "ENABLE_FLEX_CIDR_CONTROLLER" disableInstanceTaggingController = "DISABLE_INSTANCE_TAGGING_CONTROLLER" openshiftNodeLabelId = "OPENSHIFT_NODE_LABEL_ID" // Default OpenShift node OS label key/value @@ -191,6 +192,13 @@ func (cp *CloudProvider) Initialize(clientBuilder cloudprovider.ControllerClient cp.logger, cp.instanceCache, cp.client) + flexCIDRController := NewFlexCIDRController( + factory.Core().V1().Nodes(), + factory.Core().V1().Services(), + cp.kubeclient, + cp, + cp.logger, + cp.client) nodeInformer := factory.Core().V1().Nodes() go nodeInformer.Informer().Run(wait.NeverStop) @@ -202,6 +210,12 @@ func (cp *CloudProvider) Initialize(clientBuilder cloudprovider.ControllerClient go serviceAccountInformer.Informer().Run(wait.NeverStop) go nodeInfoController.Run(wait.NeverStop) + if GetIsFeatureEnabledFromEnv(cp.logger, enableFlexCIDRController, false) { + cp.logger.Info("Flex CIDR controller enabled") + go flexCIDRController.Run(wait.NeverStop) + } else { + cp.logger.Info("Flex CIDR controller disabled because ENABLE_FLEX_CIDR_CONTROLLER is unset or set to false") + } // If the cluster is type OpenShift then the Tagging Controller // should be enabled. diff --git a/pkg/cloudprovider/providers/oci/ccm_test.go b/pkg/cloudprovider/providers/oci/ccm_test.go index a7cc2308ed..2c306b52bc 100644 --- a/pkg/cloudprovider/providers/oci/ccm_test.go +++ b/pkg/cloudprovider/providers/oci/ccm_test.go @@ -203,3 +203,21 @@ func TestParseOpenShiftLabelSelectorInvalid(t *testing.T) { t.Fatalf("expected error for missing key and value") } } + +func TestEnableFlexCIDRControllerEnv(t *testing.T) { + logger := zap.NewNop().Sugar() + + t.Run("defaults to enabled", func(t *testing.T) { + t.Setenv(enableFlexCIDRController, "") + if got := GetIsFeatureEnabledFromEnv(logger, enableFlexCIDRController, false); got { + t.Fatalf("GetIsFeatureEnabledFromEnv(%q) = %v, want false", enableFlexCIDRController, got) + } + }) + + t.Run("can be enabled explicitly", func(t *testing.T) { + t.Setenv(enableFlexCIDRController, "true") + if got := GetIsFeatureEnabledFromEnv(logger, enableFlexCIDRController, false); !got { + t.Fatalf("GetIsFeatureEnabledFromEnv(%q) = %v, want true", enableFlexCIDRController, got) + } + }) +} diff --git a/pkg/cloudprovider/providers/oci/flex_cidr_controller.go b/pkg/cloudprovider/providers/oci/flex_cidr_controller.go new file mode 100644 index 0000000000..2186eac1f5 --- /dev/null +++ b/pkg/cloudprovider/providers/oci/flex_cidr_controller.go @@ -0,0 +1,253 @@ +// Copyright 2026 Oracle and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/oracle/oci-cloud-controller-manager/pkg/flexcidr" + "github.com/oracle/oci-cloud-controller-manager/pkg/oci/client" + "github.com/oracle/oci-go-sdk/v65/core" + "github.com/pkg/errors" + "go.uber.org/zap" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + coreinformers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +const flexCIDRRetryDelay = time.Minute + +type FlexCIDRController struct { + nodeInformer coreinformers.NodeInformer + serviceInformer coreinformers.ServiceInformer + kubeClient clientset.Interface + cloud *CloudProvider + queue workqueue.RateLimitingInterface + logger *zap.SugaredLogger + ociClient client.Interface + expectedPodCIDRsMu sync.RWMutex + expectedPodCIDRsByNode map[string][]string +} + +func NewFlexCIDRController( + nodeInformer coreinformers.NodeInformer, + serviceInformer coreinformers.ServiceInformer, + kubeClient clientset.Interface, + cloud *CloudProvider, + logger *zap.SugaredLogger, + ociClient client.Interface) *FlexCIDRController { + + controller := &FlexCIDRController{ + nodeInformer: nodeInformer, + serviceInformer: serviceInformer, + kubeClient: kubeClient, + cloud: cloud, + queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + logger: logger, + ociClient: ociClient, + expectedPodCIDRsByNode: make(map[string][]string), + } + + controller.nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + node := obj.(*v1.Node) + controller.queue.Add(node.Name) + }, + UpdateFunc: func(_, newObj interface{}) { + node := newObj.(*v1.Node) + controller.queue.Add(node.Name) + }, + DeleteFunc: func(obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + controller.logger.With(zap.Error(err)).Debug("failed to determine deleted node cache key") + return + } + controller.deleteExpectedPodCIDRs(key) + }, + }) + + return controller +} + +func (fcc *FlexCIDRController) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer fcc.queue.ShutDown() + + fcc.logger.Info("Starting flex CIDR controller") + + if !cache.WaitForCacheSync(stopCh, fcc.nodeInformer.Informer().HasSynced, fcc.serviceInformer.Informer().HasSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for flex CIDR controller caches to sync")) + return + } + + wait.Until(fcc.runWorker, time.Second, stopCh) +} + +func (fcc *FlexCIDRController) runWorker() { + for fcc.processNextItem() { + } +} + +func (fcc *FlexCIDRController) processNextItem() bool { + key, quit := fcc.queue.Get() + if quit { + return false + } + defer fcc.queue.Done(key) + + if err := fcc.processItem(key.(string)); err != nil { + fcc.logger.Errorf("Error processing flex CIDR for node %s (will retry): %v", key, err) + fcc.queue.AddRateLimited(key) + } else { + fcc.queue.Forget(key) + } + + return true +} + +func (fcc *FlexCIDRController) processItem(key string) error { + logger := fcc.logger.With("node", key) + + node, err := fcc.nodeInformer.Lister().Get(key) + if err != nil { + return err + } + + if len(node.Spec.PodCIDRs) > 0 && len(node.Spec.ProviderID) == 0 { + logger.Debug("node already has podCIDRs but providerID is empty, skipping") + return nil + } + + if expectedPodCIDRs, ok := fcc.getExpectedPodCIDRs(node.Name); ok && flexcidr.StringSlicesEqualIgnoreOrder(node.Spec.PodCIDRs, expectedPodCIDRs) { + logger.Debugf("node already has cached expected podCIDRs %v", expectedPodCIDRs) + return nil + } + + instance, instanceID, err := fcc.getInstanceByNode(node, logger) + if err != nil { + return err + } + if instance == nil { + return nil + } + + if instance.LifecycleState != core.InstanceLifecycleStateRunning { + logger.Infof("instance %s not running yet, requeueing", instanceID) + fcc.queue.AddAfter(key, flexCIDRRetryDelay) + return nil + } + + config, hasConfig := flexcidr.ParsePrimaryVnicConfig(instance) + if !hasConfig { + logger.Debug("instance metadata does not include flex CIDR configuration, skipping") + return nil + } + + clusterIPFamily, err := flexcidr.GetClusterIpFamily(context.Background(), fcc.serviceInformer.Lister()) + if err != nil { + logger.With(zap.Error(err)).Info("cluster IP family not ready yet, requeueing") + fcc.queue.AddAfter(key, flexCIDRRetryDelay) + return nil + } + + primaryVNIC, err := fcc.ociClient.Compute().GetPrimaryVNICForInstance(context.Background(), *instance.CompartmentId, instanceID) + if err != nil { + return errors.Wrap(err, "GetPrimaryVNICForInstance") + } + + flexCIDRManager := &flexcidr.FlexCIDR{ + Logger: logger, + PrimaryVnicConfig: config, + ClusterIpFamily: clusterIPFamily, + OciCoreClient: fcc.ociClient.Networking(nil), + } + + flexCIDRs, err := flexCIDRManager.GetOrCreateFlexCidrList(*primaryVNIC.Id) + if err != nil { + return err + } + if !flexCIDRManager.ValidateFlexCidrList(flexCIDRs) { + return fmt.Errorf("computed flex CIDRs %v are invalid", flexCIDRs) + } + fcc.setExpectedPodCIDRs(node.Name, flexCIDRs) + if flexcidr.StringSlicesEqualIgnoreOrder(node.Spec.PodCIDRs, flexCIDRs) { + logger.Debugf("node already has expected podCIDRs %v", flexCIDRs) + return nil + } + + return flexcidr.PatchNodePodCIDRs(context.Background(), fcc.kubeClient, node.Name, flexCIDRs, logger) +} + +func (fcc *FlexCIDRController) getExpectedPodCIDRs(nodeName string) ([]string, bool) { + fcc.expectedPodCIDRsMu.RLock() + defer fcc.expectedPodCIDRsMu.RUnlock() + + podCIDRs, ok := fcc.expectedPodCIDRsByNode[nodeName] + if !ok { + return nil, false + } + return append([]string(nil), podCIDRs...), true +} + +func (fcc *FlexCIDRController) setExpectedPodCIDRs(nodeName string, podCIDRs []string) { + fcc.expectedPodCIDRsMu.Lock() + defer fcc.expectedPodCIDRsMu.Unlock() + + fcc.expectedPodCIDRsByNode[nodeName] = append([]string(nil), podCIDRs...) +} + +func (fcc *FlexCIDRController) deleteExpectedPodCIDRs(nodeName string) { + fcc.expectedPodCIDRsMu.Lock() + defer fcc.expectedPodCIDRsMu.Unlock() + + delete(fcc.expectedPodCIDRsByNode, nodeName) +} + +func (fcc *FlexCIDRController) getInstanceByNode(node *v1.Node, logger *zap.SugaredLogger) (*core.Instance, string, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + providerID := node.Spec.ProviderID + var err error + if providerID == "" { + providerID, err = fcc.cloud.InstanceID(ctx, types.NodeName(node.Name)) + if err != nil { + return nil, "", err + } + } + + instanceID, err := MapProviderIDToResourceID(providerID) + if err != nil { + logger.With(zap.Error(err)).Error("failed to map providerID to instanceID") + return nil, "", err + } + + instance, err := fcc.ociClient.Compute().GetInstance(ctx, instanceID) + if err != nil { + logger.With(zap.Error(err)).Error("failed to fetch instance") + return nil, "", err + } + + return instance, instanceID, nil +} diff --git a/pkg/cloudprovider/providers/oci/flex_cidr_controller_test.go b/pkg/cloudprovider/providers/oci/flex_cidr_controller_test.go new file mode 100644 index 0000000000..2be706bc5e --- /dev/null +++ b/pkg/cloudprovider/providers/oci/flex_cidr_controller_test.go @@ -0,0 +1,66 @@ +// Copyright 2026 Oracle and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "testing" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" +) + +func TestProcessItemSkipsOCILookupsWhenNodeAlreadyHasCachedExpectedPodCIDRs(t *testing.T) { + kubeClient := fake.NewSimpleClientset() + factory := informers.NewSharedInformerFactory(kubeClient, 0) + nodeInformer := factory.Core().V1().Nodes() + + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: "worker-node-3"}, + Spec: corev1.NodeSpec{ + ProviderID: "oci://instance", + PodCIDRs: []string{"10.0.0.0/24", "2001:db8::/80"}, + }, + } + if err := nodeInformer.Informer().GetStore().Add(node); err != nil { + t.Fatalf("adding node to informer store: %v", err) + } + + controller := &FlexCIDRController{ + nodeInformer: nodeInformer, + logger: zap.NewNop().Sugar(), + expectedPodCIDRsByNode: make(map[string][]string), + } + controller.setExpectedPodCIDRs(node.Name, []string{"10.0.0.0/24", "2001:db8::/80"}) + + if err := controller.processItem(node.Name); err != nil { + t.Fatalf("processItem() error = %v, want nil", err) + } +} + +func TestDeleteExpectedPodCIDRsRemovesCachedValue(t *testing.T) { + controller := &FlexCIDRController{ + expectedPodCIDRsByNode: make(map[string][]string), + } + + controller.setExpectedPodCIDRs("worker-node-3", []string{"10.0.0.0/24"}) + controller.deleteExpectedPodCIDRs("worker-node-3") + + if _, ok := controller.getExpectedPodCIDRs("worker-node-3"); ok { + t.Fatal("expected cached podCIDRs to be removed") + } +} diff --git a/pkg/cloudprovider/providers/oci/instances_test.go b/pkg/cloudprovider/providers/oci/instances_test.go index 58f3015404..9cdef2ef5e 100644 --- a/pkg/cloudprovider/providers/oci/instances_test.go +++ b/pkg/cloudprovider/providers/oci/instances_test.go @@ -1037,6 +1037,10 @@ func (c *MockVirtualNetworkClient) CreatePrivateIp(ctx context.Context, vnicID s return nil, nil } +func (c *MockVirtualNetworkClient) CreatePrivateIpWithRequest(ctx context.Context, request core.CreatePrivateIpRequest) (core.PrivateIp, error) { + return core.PrivateIp{}, nil +} + func (c *MockVirtualNetworkClient) GetIpv6(ctx context.Context, id string) (*core.Ipv6, error) { return &core.Ipv6{}, nil } @@ -1057,6 +1061,10 @@ func (c *MockVirtualNetworkClient) CreateIpv6(ctx context.Context, vnicID string return &core.Ipv6{}, nil } +func (c *MockVirtualNetworkClient) CreateIpv6WithRequest(ctx context.Context, request core.CreateIpv6Request) (core.Ipv6, error) { + return core.Ipv6{}, nil +} + func (c *MockVirtualNetworkClient) GetSubnet(ctx context.Context, id string) (*core.Subnet, error) { if subnet, ok := subnets[id]; ok { return subnet, nil diff --git a/pkg/csi/driver/bv_controller_test.go b/pkg/csi/driver/bv_controller_test.go index 8657121410..2cb52411cb 100644 --- a/pkg/csi/driver/bv_controller_test.go +++ b/pkg/csi/driver/bv_controller_test.go @@ -487,10 +487,26 @@ func (c *MockVirtualNetworkClient) CreatePrivateIp(ctx context.Context, vnicId s return &core.PrivateIp{}, nil } +func (c *MockVirtualNetworkClient) CreatePrivateIpWithRequest(ctx context.Context, request core.CreatePrivateIpRequest) (core.PrivateIp, error) { + return core.PrivateIp{}, nil +} + func (c *MockVirtualNetworkClient) ListPrivateIps(ctx context.Context, id string) ([]core.PrivateIp, error) { return []core.PrivateIp{}, nil } +func (c *MockVirtualNetworkClient) ListIpv6s(ctx context.Context, vnicId string) ([]core.Ipv6, error) { + return []core.Ipv6{}, nil +} + +func (c *MockVirtualNetworkClient) CreateIpv6(ctx context.Context, vnicID string) (*core.Ipv6, error) { + return &core.Ipv6{}, nil +} + +func (c *MockVirtualNetworkClient) CreateIpv6WithRequest(ctx context.Context, request core.CreateIpv6Request) (core.Ipv6, error) { + return core.Ipv6{}, nil +} + func (c *MockVirtualNetworkClient) GetSubnet(ctx context.Context, id string) (*core.Subnet, error) { if strings.EqualFold(id, "ocid1.invalid-subnet") { return nil, errors.New("Internal Error.") diff --git a/pkg/flexcidr/flexcidr.go b/pkg/flexcidr/flexcidr.go new file mode 100644 index 0000000000..c2aaac0e42 --- /dev/null +++ b/pkg/flexcidr/flexcidr.go @@ -0,0 +1,447 @@ +// Copyright 2026 Oracle and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flexcidr + +import ( + "context" + "encoding/json" + "fmt" + "math/bits" + "net" + "sync" + "time" + + ociclient "github.com/oracle/oci-cloud-controller-manager/pkg/oci/client" + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/core" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/util/retry" +) + +const ( + podCIDRTag = "pod-cidr" + primaryVnicMetadataKey = "flexcidr-primary-vnic" + createTimeout = 55 * time.Second + listTimeout = 25 * time.Second + defaultNamespace = "default" +) + +type IpFamily struct { + IPv4 string + IPv6 string +} + +type PrimaryVnicConfig struct { + IPCount *int `json:"ip-count"` + CIDRBlocks []string `json:"cidr-blocks"` +} + +func ParsePrimaryVnicConfig(instance *core.Instance) (PrimaryVnicConfig, bool) { + v, ok := instance.Metadata[primaryVnicMetadataKey] + if !ok { + return PrimaryVnicConfig{}, false + } + + var config PrimaryVnicConfig + if err := json.Unmarshal([]byte(v), &config); err != nil { + return PrimaryVnicConfig{}, false + } + return config, true +} + +func GetClusterIpFamily(ctx context.Context, serviceLister corelisters.ServiceLister) (IpFamily, error) { + svc, err := serviceLister.Services(defaultNamespace).Get("kubernetes") + if err != nil { + return IpFamily{}, err + } + var family IpFamily + ipFamilies := svc.Spec.IPFamilies + if len(ipFamilies) == 0 || len(ipFamilies) > 2 { + return family, fmt.Errorf("IPFamily unset/invalid") + } + for _, ipFamily := range ipFamilies { + if ipFamily == corev1.IPv4Protocol { + family.IPv4 = "IPv4" + } + if ipFamily == corev1.IPv6Protocol { + family.IPv6 = "IPv6" + } + } + return family, nil +} + +func PatchNodePodCIDRs(ctx context.Context, kubeClient kubernetes.Interface, nodeName string, podCIDRs []string, logger *zap.SugaredLogger) error { + if len(podCIDRs) == 0 { + return fmt.Errorf("no PodCIDRs computed") + } + + patchBytes, err := json.Marshal(map[string]any{ + "spec": map[string]any{ + "podCIDR": podCIDRs[0], + "podCIDRs": append([]string(nil), podCIDRs...), + }, + }) + if err != nil { + return err + } + + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, patchErr := kubeClient.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + return patchErr + }); err != nil { + if logger != nil { + logger.Errorf("failed to patch node %s podCIDRs: %v", nodeName, err) + } + return err + } + + if logger != nil { + logger.Infof("successfully patched node %s podCIDRs to %v", nodeName, podCIDRs) + } + return nil +} + +func StringSlicesEqualIgnoreOrder(a, b []string) bool { + if len(a) != len(b) { + return false + } + m := make(map[string]int, len(a)) + for _, s := range a { + m[s]++ + } + for _, s := range b { + m[s]-- + if m[s] < 0 { + return false + } + } + for _, v := range m { + if v != 0 { + return false + } + } + return true +} + +func Ipv4PrefixFromCount(ipCount int) (int, error) { + if ipCount <= 0 || (ipCount&(ipCount-1)) != 0 { + return 0, fmt.Errorf("ipCount must be a power of 2, got %d", ipCount) + } + k := bits.Len(uint(ipCount)) - 1 + pfx := 32 - k + if pfx < 18 { + return 0, fmt.Errorf("ipCount=%d yields /%d; OCI requires cidrPrefixLength >= 18 (max ipCount 16384)", ipCount, pfx) + } + if pfx > 30 { + return 0, fmt.Errorf("ipCount=%d yields /%d; must be <= /30 (min ipCount 4)", ipCount, pfx) + } + return pfx, nil +} + +func Ipv6PrefixFromCount(ipCount int) (int, error) { + if ipCount <= 0 || (ipCount&(ipCount-1)) != 0 { + return 0, fmt.Errorf("ipCount must be a power of 2, got %d", ipCount) + } + k := bits.Len(uint(ipCount)) - 1 + pfx := 128 - k + validPfx := pfx / 4 * 4 + if pfx < 80 { + return 0, fmt.Errorf("ipCount=%d yields /%d; must be >= /80 (max ipCount 2^48)", ipCount, pfx) + } + return validPfx, nil +} + +func getCIDRsByFamily(cidrBlocks []string) ([]string, []string, error) { + var ipv4CidrBlocks, ipv6CidrBlocks []string + + for _, cidr := range cidrBlocks { + ip, _, err := net.ParseCIDR(cidr) + if err != nil { + return nil, nil, fmt.Errorf("invalid CIDR block in %s.cidrBlocks: %q: %w", primaryVnicMetadataKey, cidr, err) + } + + if ip.To4() != nil { + ipv4CidrBlocks = append(ipv4CidrBlocks, cidr) + } else { + ipv6CidrBlocks = append(ipv6CidrBlocks, cidr) + } + } + + return ipv4CidrBlocks, ipv6CidrBlocks, nil +} + +func formatIpCidr(ip string, mask *int) string { + if mask == nil { + return ip + } + return fmt.Sprintf("%s/%d", ip, *mask) +} + +type FlexCIDR struct { + Logger *zap.SugaredLogger + PrimaryVnicConfig PrimaryVnicConfig + ClusterIpFamily IpFamily + OciCoreClient ociclient.NetworkingInterface +} + +func (f *FlexCIDR) validateCidrBlocks(ipv4Blocks []string, ipv6Blocks []string) (string, string, error) { + if f.ClusterIpFamily.IPv4 == "" && len(ipv4Blocks) > 0 { + return "", "", fmt.Errorf("IPv4 CIDR is not allowed for this cluster but provided: %v", ipv4Blocks) + } + if f.ClusterIpFamily.IPv6 == "" && len(ipv6Blocks) > 0 { + return "", "", fmt.Errorf("IPv6 CIDR is not allowed for this cluster but provided: %v", ipv6Blocks) + } + if len(ipv4Blocks) > 1 || len(ipv6Blocks) > 1 { + return "", "", fmt.Errorf("only one IPv4 CIDR block and one IPv6 CIDR block are allowed for DualStack cluster; found %d IPv4 and %d IPv6 CIDR blocks", len(ipv4Blocks), len(ipv6Blocks)) + } + var ipv4, ipv6 string + if len(ipv4Blocks) == 1 { + ipv4 = ipv4Blocks[0] + } + if len(ipv6Blocks) == 1 { + ipv6 = ipv6Blocks[0] + } + return ipv4, ipv6, nil +} + +func (f *FlexCIDR) getCidrBlocks() (string, string, error) { + if f.Logger != nil { + f.Logger.Infof("PrimaryVnicConfig CIDR blocks: %v", f.PrimaryVnicConfig.CIDRBlocks) + } + ipv4Blocks, ipv6Blocks, err := getCIDRsByFamily(f.PrimaryVnicConfig.CIDRBlocks) + if err != nil { + return "", "", err + } + return f.validateCidrBlocks(ipv4Blocks, ipv6Blocks) +} + +func (f *FlexCIDR) ValidateFlexCidrList(flexCidrs []string) bool { + if len(flexCidrs) == 0 { + if f.Logger != nil { + f.Logger.Errorf("flexCidrs is empty") + } + return false + } + for _, flexCidr := range flexCidrs { + if flexCidr == "" { + if f.Logger != nil { + f.Logger.Errorf("flexCidrs contains empty string") + } + return false + } + } + if f.ClusterIpFamily.IPv4 != "" && f.ClusterIpFamily.IPv6 != "" && len(flexCidrs) != 2 { + if f.Logger != nil { + f.Logger.Errorf("existing flexCidrs %v for dual stack should contain both IPv4 and IPv6 CIDRs", flexCidrs) + } + return false + } + if (f.ClusterIpFamily.IPv4 != "" && f.ClusterIpFamily.IPv6 == "") || (f.ClusterIpFamily.IPv4 == "" && f.ClusterIpFamily.IPv6 != "") { + if len(flexCidrs) != 1 { + if f.Logger != nil { + f.Logger.Errorf("flexCidrs %v is not valid for single stack cluster", flexCidrs) + } + return false + } + } + return true +} + +func (f *FlexCIDR) GetFlexCidrList(primaryVnicID string) ([]string, bool) { + ctx, cancel := context.WithTimeout(context.Background(), listTimeout) + defer cancel() + + var flexCidrs []string + + if f.ClusterIpFamily.IPv4 != "" { + privateIPs, err := f.OciCoreClient.ListPrivateIps(ctx, primaryVnicID) + if err != nil { + if f.Logger != nil { + f.Logger.Errorf("failed to list private IPs for VNIC %s: %v", primaryVnicID, err) + } + return flexCidrs, false + } + + for _, privateIP := range privateIPs { + if _, ok := privateIP.FreeformTags[podCIDRTag]; ok { + flexCidrs = append(flexCidrs, formatIpCidr(*privateIP.IpAddress, privateIP.CidrPrefixLength)) + } + } + } + + if f.ClusterIpFamily.IPv6 != "" { + ipv6s, err := f.OciCoreClient.ListIpv6s(ctx, primaryVnicID) + if err != nil { + if f.Logger != nil { + f.Logger.Errorf("failed to list IPv6s for VNIC %s: %v", primaryVnicID, err) + } + return flexCidrs, false + } + + for _, ipv6 := range ipv6s { + if _, ok := ipv6.FreeformTags[podCIDRTag]; ok { + flexCidrs = append(flexCidrs, formatIpCidr(*ipv6.IpAddress, ipv6.CidrPrefixLength)) + } + } + } + + return flexCidrs, len(flexCidrs) > 0 +} + +func (f *FlexCIDR) CreateFlexCidr(primaryVnicID string, isIPv4 bool, isIPv6 bool) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), createTimeout) + defer cancel() + + flexCidr := "" + + if isIPv4 == isIPv6 { + return "", fmt.Errorf("exactly one IP family must be requested") + } + + ipv4CidrBlock, ipv6CidrBlock, err := f.getCidrBlocks() + if err != nil { + return flexCidr, err + } + + if f.PrimaryVnicConfig.IPCount == nil { + return "", fmt.Errorf("primaryVNIC.ipCount is nil") + } + ipCount := *f.PrimaryVnicConfig.IPCount + + if isIPv4 { + cidrPrefixLength, err := Ipv4PrefixFromCount(ipCount) + if err != nil { + return "", err + } + createPrivateIPDetails := core.CreatePrivateIpDetails{ + VnicId: common.String(primaryVnicID), + CidrPrefixLength: common.Int(cidrPrefixLength), + FreeformTags: map[string]string{podCIDRTag: "true"}, + } + if ipv4CidrBlock != "" { + createPrivateIPDetails.Ipv4SubnetCidrAtCreation = common.String(ipv4CidrBlock) + } + privateIP, err := f.OciCoreClient.CreatePrivateIpWithRequest(ctx, core.CreatePrivateIpRequest{ + CreatePrivateIpDetails: createPrivateIPDetails, + }) + if err != nil { + return flexCidr, fmt.Errorf("failed to assign flex CIDR to IPv4 VNIC: %w", err) + } + ipv4Address := *privateIP.IpAddress + parsedIP := net.ParseIP(ipv4Address) + if parsedIP == nil || parsedIP.To4() == nil { + return flexCidr, fmt.Errorf("flex CIDR address (%s) returned by VCN is not a valid IPv4 address", ipv4Address) + } + flexCidr = formatIpCidr(ipv4Address, privateIP.CidrPrefixLength) + } + + if isIPv6 { + cidrPrefixLength, err := Ipv6PrefixFromCount(ipCount) + if err != nil { + return "", err + } + createIPv6Details := core.CreateIpv6Details{ + VnicId: common.String(primaryVnicID), + CidrPrefixLength: common.Int(cidrPrefixLength), + FreeformTags: map[string]string{podCIDRTag: "true"}, + } + if ipv6CidrBlock != "" { + createIPv6Details.Ipv6SubnetCidr = common.String(ipv6CidrBlock) + } + ipv6, err := f.OciCoreClient.CreateIpv6WithRequest(ctx, core.CreateIpv6Request{ + CreateIpv6Details: createIPv6Details, + }) + if err != nil { + return flexCidr, fmt.Errorf("failed to assign flex CIDR to IPv6 VNIC: %w", err) + } + ipv6Address := *ipv6.IpAddress + parsedIP := net.ParseIP(ipv6Address) + if parsedIP == nil || parsedIP.To4() != nil { + return flexCidr, fmt.Errorf("flex CIDR address (%s) returned by VCN is not a valid IPv6 address", ipv6Address) + } + flexCidr = formatIpCidr(ipv6Address, ipv6.CidrPrefixLength) + } + + return flexCidr, nil +} + +func (f *FlexCIDR) GetOrCreateFlexCidrList(primaryVnicID string) ([]string, error) { + var flexCidrs []string + + existingFlexCIDRs, ok := f.GetFlexCidrList(primaryVnicID) + if ok { + if f.Logger != nil { + f.Logger.Infof("flexCidrs %v already exist on primary VNIC %s", existingFlexCIDRs, primaryVnicID) + } + if f.ValidateFlexCidrList(existingFlexCIDRs) { + return existingFlexCIDRs, nil + } + return nil, fmt.Errorf("flexCidrs %v is invalid", existingFlexCIDRs) + } + + type createResult struct { + cidr string + err error + } + + var ( + wg sync.WaitGroup + ipv4Result createResult + ipv6Result createResult + ) + + if f.ClusterIpFamily.IPv4 != "" { + wg.Add(1) + go func() { + defer wg.Done() + ipv4Result.cidr, ipv4Result.err = f.CreateFlexCidr(primaryVnicID, true, false) + }() + } + + if f.ClusterIpFamily.IPv6 != "" { + wg.Add(1) + go func() { + defer wg.Done() + ipv6Result.cidr, ipv6Result.err = f.CreateFlexCidr(primaryVnicID, false, true) + }() + } + + wg.Wait() + + if ipv4Result.err != nil { + return nil, ipv4Result.err + } + if ipv6Result.err != nil { + return nil, ipv6Result.err + } + if f.ClusterIpFamily.IPv4 != "" { + flexCidrs = append(flexCidrs, ipv4Result.cidr) + } + if f.ClusterIpFamily.IPv6 != "" { + flexCidrs = append(flexCidrs, ipv6Result.cidr) + } + + if len(flexCidrs) == 0 { + return nil, apierrors.NewBadRequest("no flex CIDRs created") + } + + return flexCidrs, nil +} diff --git a/pkg/flexcidr/flexcidr_test.go b/pkg/flexcidr/flexcidr_test.go new file mode 100644 index 0000000000..525785586a --- /dev/null +++ b/pkg/flexcidr/flexcidr_test.go @@ -0,0 +1,445 @@ +// Copyright 2026 Oracle and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flexcidr + +import ( + "context" + "testing" + "time" + + ociclient "github.com/oracle/oci-cloud-controller-manager/pkg/oci/client" + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/core" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" +) + +type testOciCoreClient struct { + ociclient.NetworkingInterface + + listPrivateIpsResp []core.PrivateIp + listPrivateIpsErr error + listIpv6sResp []core.Ipv6 + listIpv6sErr error + + createPrivateIpResp core.PrivateIp + createPrivateIpErr error + lastCreatePrivate *core.CreatePrivateIpRequest + privateStarted chan struct{} + privateRelease <-chan struct{} + + createIpv6Resp core.Ipv6 + createIpv6Err error + lastCreateIpv6 *core.CreateIpv6Request + ipv6Started chan struct{} + ipv6Release <-chan struct{} +} + +func (m *testOciCoreClient) ListPrivateIps(_ context.Context, _ string) ([]core.PrivateIp, error) { + return m.listPrivateIpsResp, m.listPrivateIpsErr +} + +func (m *testOciCoreClient) ListIpv6s(_ context.Context, _ string) ([]core.Ipv6, error) { + return m.listIpv6sResp, m.listIpv6sErr +} + +func (m *testOciCoreClient) CreatePrivateIpWithRequest(_ context.Context, req core.CreatePrivateIpRequest) (core.PrivateIp, error) { + m.lastCreatePrivate = &req + if m.privateStarted != nil { + close(m.privateStarted) + } + if m.privateRelease != nil { + <-m.privateRelease + } + return m.createPrivateIpResp, m.createPrivateIpErr +} + +func (m *testOciCoreClient) CreateIpv6WithRequest(_ context.Context, req core.CreateIpv6Request) (core.Ipv6, error) { + m.lastCreateIpv6 = &req + if m.ipv6Started != nil { + close(m.ipv6Started) + } + if m.ipv6Release != nil { + <-m.ipv6Release + } + return m.createIpv6Resp, m.createIpv6Err +} + +func testLogger() *zap.SugaredLogger { + return zap.NewNop().Sugar() +} + +func TestParsePrimaryVnicConfig(t *testing.T) { + instance := &core.Instance{Metadata: map[string]string{primaryVnicMetadataKey: `{"ip-count":16,"cidr-blocks":["10.0.0.0/24"]}`}} + cfg, ok := ParsePrimaryVnicConfig(instance) + assert.True(t, ok) + if assert.NotNil(t, cfg.IPCount) { + assert.Equal(t, 16, *cfg.IPCount) + } + assert.Equal(t, []string{"10.0.0.0/24"}, cfg.CIDRBlocks) + + _, ok = ParsePrimaryVnicConfig(&core.Instance{Metadata: map[string]string{primaryVnicMetadataKey: `{"ip-count":`}}) + assert.False(t, ok) + + _, ok = ParsePrimaryVnicConfig(&core.Instance{Metadata: map[string]string{"other": "x"}}) + assert.False(t, ok) +} + +func TestGetClusterIpFamily(t *testing.T) { + kubeClient := fake.NewSimpleClientset(&corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "kubernetes", Namespace: defaultNamespace}, + Spec: corev1.ServiceSpec{IPFamilies: []corev1.IPFamily{corev1.IPv4Protocol, corev1.IPv6Protocol}}, + }) + factory := informers.NewSharedInformerFactory(kubeClient, 0) + serviceInformer := factory.Core().V1().Services() + assert.NoError(t, serviceInformer.Informer().GetStore().Add(&corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "kubernetes", Namespace: defaultNamespace}, + Spec: corev1.ServiceSpec{IPFamilies: []corev1.IPFamily{corev1.IPv4Protocol, corev1.IPv6Protocol}}, + })) + + family, err := GetClusterIpFamily(context.Background(), serviceInformer.Lister()) + assert.NoError(t, err) + assert.Equal(t, IpFamily{IPv4: "IPv4", IPv6: "IPv6"}, family) +} + +func TestPatchNodePodCIDRs(t *testing.T) { + kubeClient := fake.NewSimpleClientset(&corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Labels: map[string]string{"existing": "label"}, + }, + Spec: corev1.NodeSpec{ + ProviderID: "oci://instance", + }, + }) + + err := PatchNodePodCIDRs(context.Background(), kubeClient, "node-1", []string{"10.0.0.0/24", "2001:db8::/80"}, testLogger()) + assert.NoError(t, err) + + updated, err := kubeClient.CoreV1().Nodes().Get(context.Background(), "node-1", metav1.GetOptions{}) + assert.NoError(t, err) + assert.Equal(t, "10.0.0.0/24", updated.Spec.PodCIDR) + assert.ElementsMatch(t, []string{"10.0.0.0/24", "2001:db8::/80"}, updated.Spec.PodCIDRs) + assert.Equal(t, "label", updated.Labels["existing"]) + assert.Equal(t, "oci://instance", updated.Spec.ProviderID) +} + +func TestStringSlicesEqualIgnoreOrder(t *testing.T) { + assert.True(t, StringSlicesEqualIgnoreOrder([]string{"a", "b"}, []string{"b", "a"})) + assert.True(t, StringSlicesEqualIgnoreOrder([]string{"a", "a", "b"}, []string{"a", "b", "a"})) + assert.False(t, StringSlicesEqualIgnoreOrder([]string{"a"}, []string{"a", "b"})) + assert.False(t, StringSlicesEqualIgnoreOrder([]string{"a", "b"}, []string{"a", "c"})) +} + +func TestIpv4PrefixFromCount(t *testing.T) { + tests := []struct { + name string + count int + prefix int + errText string + }{ + {name: "valid minimum", count: 4, prefix: 30}, + {name: "valid maximum", count: 16384, prefix: 18}, + {name: "not power of two", count: 3, errText: "power of 2"}, + {name: "too small prefix", count: 32768, errText: "requires cidrPrefixLength >= 18"}, + {name: "too large prefix", count: 2, errText: "must be <= /30"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + prefix, err := Ipv4PrefixFromCount(tt.count) + if tt.errText != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.errText) + return + } + assert.NoError(t, err) + assert.Equal(t, tt.prefix, prefix) + }) + } +} + +func TestIpv6PrefixFromCount(t *testing.T) { + tests := []struct { + name string + count int + prefix int + errText string + }{ + {name: "valid rounds to nibble", count: 1024, prefix: 116}, + {name: "valid exact nibble", count: 65536, prefix: 112}, + {name: "not power of two", count: 7, errText: "power of 2"}, + {name: "too small prefix", count: 1 << 49, errText: "must be >= /80"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + prefix, err := Ipv6PrefixFromCount(tt.count) + if tt.errText != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.errText) + return + } + assert.NoError(t, err) + assert.Equal(t, tt.prefix, prefix) + }) + } +} + +func TestGetCIDRsByFamily(t *testing.T) { + ipv4, ipv6, err := getCIDRsByFamily([]string{"10.0.0.0/24", "2001:db8::/64"}) + assert.NoError(t, err) + assert.Equal(t, []string{"10.0.0.0/24"}, ipv4) + assert.Equal(t, []string{"2001:db8::/64"}, ipv6) + + _, _, err = getCIDRsByFamily([]string{"invalid"}) + assert.Error(t, err) +} + +func TestValidateCidrBlocks(t *testing.T) { + f := &FlexCIDR{Logger: testLogger(), ClusterIpFamily: IpFamily{IPv4: "IPv4", IPv6: "IPv6"}} + ipv4, ipv6, err := f.validateCidrBlocks([]string{"10.0.0.0/24"}, []string{"2001:db8::/64"}) + assert.NoError(t, err) + assert.Equal(t, "10.0.0.0/24", ipv4) + assert.Equal(t, "2001:db8::/64", ipv6) + + f = &FlexCIDR{Logger: testLogger(), ClusterIpFamily: IpFamily{IPv4: "IPv4"}} + _, _, err = f.validateCidrBlocks(nil, []string{"2001:db8::/64"}) + assert.Error(t, err) + + f = &FlexCIDR{Logger: testLogger(), ClusterIpFamily: IpFamily{IPv4: "IPv4", IPv6: "IPv6"}} + _, _, err = f.validateCidrBlocks([]string{"10.0.0.0/24", "10.1.0.0/24"}, nil) + assert.Error(t, err) +} + +func TestValidateFlexCidrList(t *testing.T) { + fDual := &FlexCIDR{Logger: testLogger(), ClusterIpFamily: IpFamily{IPv4: "IPv4", IPv6: "IPv6"}} + assert.True(t, fDual.ValidateFlexCidrList([]string{"10.0.0.1/30", "2001:db8::1/116"})) + assert.False(t, fDual.ValidateFlexCidrList([]string{"10.0.0.1/30"})) + + fSingle := &FlexCIDR{Logger: testLogger(), ClusterIpFamily: IpFamily{IPv4: "IPv4"}} + assert.True(t, fSingle.ValidateFlexCidrList([]string{"10.0.0.1/30"})) + assert.False(t, fSingle.ValidateFlexCidrList([]string{"10.0.0.1/30", "10.0.0.2/30"})) + assert.False(t, fSingle.ValidateFlexCidrList([]string{""})) +} + +func TestGetFlexCidrList(t *testing.T) { + pfx4 := 30 + pfx6 := 116 + fakeClient := &testOciCoreClient{ + listPrivateIpsResp: []core.PrivateIp{ + {IpAddress: common.String("10.0.0.10"), CidrPrefixLength: &pfx4, FreeformTags: map[string]string{podCIDRTag: "true"}}, + {IpAddress: common.String("10.0.0.11"), CidrPrefixLength: &pfx4, FreeformTags: map[string]string{"other": "x"}}, + }, + listIpv6sResp: []core.Ipv6{ + {IpAddress: common.String("2001:db8::10"), CidrPrefixLength: &pfx6, FreeformTags: map[string]string{podCIDRTag: "true"}}, + }, + } + f := &FlexCIDR{Logger: testLogger(), ClusterIpFamily: IpFamily{IPv4: "IPv4", IPv6: "IPv6"}, OciCoreClient: fakeClient} + + cidrs, ok := f.GetFlexCidrList("vnic") + assert.True(t, ok) + assert.ElementsMatch(t, []string{"10.0.0.10/30", "2001:db8::10/116"}, cidrs) +} + +func TestCreateFlexCidrIPv4ConfiguresSubnetCidrWhenSet(t *testing.T) { + pfx := 22 + fakeClient := &testOciCoreClient{createPrivateIpResp: core.PrivateIp{IpAddress: common.String("10.0.1.5"), CidrPrefixLength: &pfx}} + ipCount := 1024 + f := &FlexCIDR{ + Logger: testLogger(), + PrimaryVnicConfig: PrimaryVnicConfig{IPCount: &ipCount, CIDRBlocks: []string{"10.0.0.0/24"}}, + ClusterIpFamily: IpFamily{IPv4: "IPv4"}, + OciCoreClient: fakeClient, + } + + cidr, err := f.CreateFlexCidr("vnic", true, false) + assert.NoError(t, err) + assert.Equal(t, "10.0.1.5/22", cidr) + if assert.NotNil(t, fakeClient.lastCreatePrivate) && assert.NotNil(t, fakeClient.lastCreatePrivate.CreatePrivateIpDetails.Ipv4SubnetCidrAtCreation) { + assert.Equal(t, "10.0.0.0/24", *fakeClient.lastCreatePrivate.CreatePrivateIpDetails.Ipv4SubnetCidrAtCreation) + } +} + +func TestCreateFlexCidrIPv6ConfiguresSubnetCidrWhenSet(t *testing.T) { + pfx := 116 + fakeClient := &testOciCoreClient{createIpv6Resp: core.Ipv6{IpAddress: common.String("2001:db8::5"), CidrPrefixLength: &pfx}} + ipCount := 1024 + f := &FlexCIDR{ + Logger: testLogger(), + PrimaryVnicConfig: PrimaryVnicConfig{IPCount: &ipCount, CIDRBlocks: []string{"2001:db8:1::/64"}}, + ClusterIpFamily: IpFamily{IPv6: "IPv6"}, + OciCoreClient: fakeClient, + } + + cidr, err := f.CreateFlexCidr("vnic", false, true) + assert.NoError(t, err) + assert.Equal(t, "2001:db8::5/116", cidr) + if assert.NotNil(t, fakeClient.lastCreateIpv6) && assert.NotNil(t, fakeClient.lastCreateIpv6.CreateIpv6Details.Ipv6SubnetCidr) { + assert.Equal(t, "2001:db8:1::/64", *fakeClient.lastCreateIpv6.CreateIpv6Details.Ipv6SubnetCidr) + } +} + +func TestCreateFlexCidrIPv6DoesNotConfigureSubnetCidrWhenUnset(t *testing.T) { + pfx := 116 + fakeClient := &testOciCoreClient{createIpv6Resp: core.Ipv6{IpAddress: common.String("2001:db8::6"), CidrPrefixLength: &pfx}} + ipCount := 1024 + f := &FlexCIDR{ + Logger: testLogger(), + PrimaryVnicConfig: PrimaryVnicConfig{IPCount: &ipCount}, + ClusterIpFamily: IpFamily{IPv6: "IPv6"}, + OciCoreClient: fakeClient, + } + + cidr, err := f.CreateFlexCidr("vnic", false, true) + assert.NoError(t, err) + assert.Equal(t, "2001:db8::6/116", cidr) + if assert.NotNil(t, fakeClient.lastCreateIpv6) { + assert.Nil(t, fakeClient.lastCreateIpv6.CreateIpv6Details.Ipv6SubnetCidr) + } +} + +func TestCreateFlexCidrRejectsInvalidFamilySelection(t *testing.T) { + ipCount := 1024 + f := &FlexCIDR{ + Logger: testLogger(), + PrimaryVnicConfig: PrimaryVnicConfig{IPCount: &ipCount}, + OciCoreClient: &testOciCoreClient{}, + } + + _, err := f.CreateFlexCidr("vnic", false, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "exactly one IP family") + + _, err = f.CreateFlexCidr("vnic", true, true) + assert.Error(t, err) + assert.Contains(t, err.Error(), "exactly one IP family") +} + +func TestGetOrCreateFlexCidrList(t *testing.T) { + ipCount := 1024 + pfx4 := 22 + pfx6 := 116 + fakeClient := &testOciCoreClient{ + createPrivateIpResp: core.PrivateIp{IpAddress: common.String("10.0.1.5"), CidrPrefixLength: &pfx4}, + createIpv6Resp: core.Ipv6{IpAddress: common.String("2001:db8::7"), CidrPrefixLength: &pfx6}, + } + f := &FlexCIDR{ + Logger: testLogger(), + PrimaryVnicConfig: PrimaryVnicConfig{IPCount: &ipCount}, + ClusterIpFamily: IpFamily{IPv4: "IPv4", IPv6: "IPv6"}, + OciCoreClient: fakeClient, + } + + cidrs, err := f.GetOrCreateFlexCidrList("vnic") + assert.NoError(t, err) + assert.ElementsMatch(t, []string{"10.0.1.5/22", "2001:db8::7/116"}, cidrs) +} + +func TestGetOrCreateFlexCidrListCreatesFamiliesInParallel(t *testing.T) { + ipCount := 1024 + pfx4 := 22 + pfx6 := 116 + privateStarted := make(chan struct{}) + ipv6Started := make(chan struct{}) + privateRelease := make(chan struct{}) + ipv6Release := make(chan struct{}) + fakeClient := &testOciCoreClient{ + createPrivateIpResp: core.PrivateIp{IpAddress: common.String("10.0.1.5"), CidrPrefixLength: &pfx4}, + createIpv6Resp: core.Ipv6{IpAddress: common.String("2001:db8::7"), CidrPrefixLength: &pfx6}, + privateStarted: privateStarted, + privateRelease: privateRelease, + ipv6Started: ipv6Started, + ipv6Release: ipv6Release, + } + f := &FlexCIDR{ + Logger: testLogger(), + PrimaryVnicConfig: PrimaryVnicConfig{IPCount: &ipCount}, + ClusterIpFamily: IpFamily{IPv4: "IPv4", IPv6: "IPv6"}, + OciCoreClient: fakeClient, + } + + done := make(chan struct{}) + var ( + cidrs []string + err error + ) + go func() { + cidrs, err = f.GetOrCreateFlexCidrList("vnic") + close(done) + }() + + select { + case <-privateStarted: + case <-time.After(2 * time.Second): + t.Fatal("timed out waiting for IPv4 flex CIDR creation to start") + } + + select { + case <-ipv6Started: + case <-time.After(2 * time.Second): + t.Fatal("timed out waiting for IPv6 flex CIDR creation to start") + } + + close(privateRelease) + close(ipv6Release) + + select { + case <-done: + case <-time.After(2 * time.Second): + t.Fatal("timed out waiting for parallel flex CIDR creation to finish") + } + + assert.NoError(t, err) + assert.Equal(t, []string{"10.0.1.5/22", "2001:db8::7/116"}, cidrs) +} + +func TestFormatIpCidr(t *testing.T) { + pfx := 24 + assert.Equal(t, "10.0.0.1/24", formatIpCidr("10.0.0.1", &pfx)) + assert.Equal(t, "10.0.0.1", formatIpCidr("10.0.0.1", nil)) +} + +func TestGetOrCreateFlexCidrListRejectsInvalidExistingCIDRs(t *testing.T) { + pfx4 := 30 + fakeClient := &testOciCoreClient{ + listPrivateIpsResp: []core.PrivateIp{ + {IpAddress: common.String("10.0.0.10"), CidrPrefixLength: &pfx4, FreeformTags: map[string]string{podCIDRTag: "true"}}, + }, + } + f := &FlexCIDR{ + Logger: testLogger(), + ClusterIpFamily: IpFamily{IPv4: "IPv4", IPv6: "IPv6"}, + OciCoreClient: fakeClient, + } + + _, err := f.GetOrCreateFlexCidrList("vnic") + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid") +} + +func TestParsePrimaryVnicConfigCIDRBlocksOrderIndependent(t *testing.T) { + instance := &core.Instance{Metadata: map[string]string{primaryVnicMetadataKey: `{"ip-count":16,"cidr-blocks":["2001:db8::/64","10.0.0.0/24"]}`}} + cfg, ok := ParsePrimaryVnicConfig(instance) + assert.True(t, ok) + if assert.NotNil(t, cfg.IPCount) { + assert.Equal(t, 16, *cfg.IPCount) + } + assert.Equal(t, []string{"2001:db8::/64", "10.0.0.0/24"}, cfg.CIDRBlocks) +} diff --git a/pkg/oci/client/client.go b/pkg/oci/client/client.go index cb44b1ec67..92a9f03cb3 100644 --- a/pkg/oci/client/client.go +++ b/pkg/oci/client/client.go @@ -88,6 +88,8 @@ type virtualNetworkClient interface { GetPrivateIp(ctx context.Context, request core.GetPrivateIpRequest) (response core.GetPrivateIpResponse, err error) ListPrivateIps(ctx context.Context, request core.ListPrivateIpsRequest) (response core.ListPrivateIpsResponse, err error) CreatePrivateIp(ctx context.Context, request core.CreatePrivateIpRequest) (response core.CreatePrivateIpResponse, err error) + ListIpv6s(ctx context.Context, request core.ListIpv6sRequest) (response core.ListIpv6sResponse, err error) + CreateIpv6(ctx context.Context, request core.CreateIpv6Request) (response core.CreateIpv6Response, err error) GetPublicIpByIpAddress(ctx context.Context, request core.GetPublicIpByIpAddressRequest) (response core.GetPublicIpByIpAddressResponse, err error) GetIpv6(ctx context.Context, request core.GetIpv6Request) (response core.GetIpv6Response, err error) @@ -193,7 +195,7 @@ type client struct { bs blockstorageClient identity identityClient //compartment compartmentClient - lustre lustrefilestorage.LustreFileStorageClient + lustre lustrefilestorage.LustreFileStorageClient requestMetadata common.RequestMetadata rateLimiter RateLimiter @@ -311,7 +313,7 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit bs: &bs, filestorage: &fss, //compartment: &compartment, - lustre: lustreClient, + lustre: lustreClient, rateLimiter: *opRateLimiter, requestMetadata: requestMetadata, diff --git a/pkg/oci/client/client_test.go b/pkg/oci/client/client_test.go index 47fe1295e0..b741085216 100644 --- a/pkg/oci/client/client_test.go +++ b/pkg/oci/client/client_test.go @@ -239,6 +239,14 @@ func (c *mockVirtualNetworkClient) CreatePrivateIp(ctx context.Context, request return core.CreatePrivateIpResponse{}, nil } +func (c *mockVirtualNetworkClient) ListIpv6s(ctx context.Context, request core.ListIpv6sRequest) (response core.ListIpv6sResponse, err error) { + return core.ListIpv6sResponse{}, nil +} + +func (c *mockVirtualNetworkClient) CreateIpv6(ctx context.Context, request core.CreateIpv6Request) (response core.CreateIpv6Response, err error) { + return core.CreateIpv6Response{}, nil +} + func (c *mockVirtualNetworkClient) GetIpv6(ctx context.Context, request core.GetIpv6Request) (response core.GetIpv6Response, err error) { return core.GetIpv6Response{}, nil } diff --git a/pkg/oci/client/errors.go b/pkg/oci/client/errors.go index 7183c5c50e..c84d50da67 100644 --- a/pkg/oci/client/errors.go +++ b/pkg/oci/client/errors.go @@ -16,9 +16,12 @@ package client import ( "context" + "fmt" "math" + "math/rand" "net/http" "regexp" + "sync" "time" "github.com/oracle/oci-go-sdk/v65/common" @@ -27,6 +30,18 @@ import ( ) var errNotFound = errors.New("not found") +var rateLimitRetryRNG = rand.New(rand.NewSource(time.Now().UnixNano())) +var rateLimitRetryRNGMu sync.Mutex + +var rateLimitRetryMaxAttempts uint = 6 + +var rateLimitRetryNextDuration = func(attempt uint) time.Duration { + base := math.Pow(2, float64(attempt-1)) + rateLimitRetryRNGMu.Lock() + jitter := 1 + (rateLimitRetryRNG.Float64()-0.5)*0.2 + rateLimitRetryRNGMu.Unlock() + return time.Duration(base * jitter * float64(time.Second)) +} /* Addition of system tags can fail due to permission issue while API returns error code: RelatedResourceNotAuthorizedOrNotFound & @@ -97,6 +112,46 @@ func RateLimitError(isWrite bool, opName string) error { return errors.Errorf("rate limited(%s) for operation: %s", opType, opName) } +func isRateLimitError(err error) bool { + if err == nil { + return false + } + + serviceErr, ok := common.IsServiceError(errors.Cause(err)) + return ok && serviceErr.GetHTTPStatusCode() == http.StatusTooManyRequests +} + +func runWithRateLimitRetry(ctx context.Context, logger *zap.SugaredLogger, operation string, fn func(context.Context) error) error { + var lastErr error + + for attempt := uint(1); rateLimitRetryMaxAttempts == 0 || attempt <= rateLimitRetryMaxAttempts; attempt++ { + opErr := fn(ctx) + lastErr = opErr + if !isRateLimitError(opErr) { + return opErr + } + + backoff := rateLimitRetryNextDuration(attempt) + if logger != nil { + logger.Warnf("%s hit rate limit on attempt %d, retrying in %s", operation, attempt, backoff) + } + if deadline, ok := ctx.Deadline(); ok && time.Now().Add(backoff).After(deadline) { + return fmt.Errorf("%s retry exceeded context deadline: %w", operation, context.DeadlineExceeded) + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(backoff): + } + } + + if lastErr == nil { + lastErr = fmt.Errorf("operation %s reached retry limit", operation) + } + return fmt.Errorf("%s retry exceeded maximum attempts: %w", operation, lastErr) +} + func newRetryPolicy() *common.RetryPolicy { return NewRetryPolicyWithMaxAttempts(uint(2)) } diff --git a/pkg/oci/client/networking.go b/pkg/oci/client/networking.go index 722914fd74..d18c3a111a 100644 --- a/pkg/oci/client/networking.go +++ b/pkg/oci/client/networking.go @@ -41,7 +41,11 @@ type NetworkingInterface interface { ListPrivateIps(ctx context.Context, vnicId string) ([]core.PrivateIp, error) GetPrivateIp(ctx context.Context, id string) (*core.PrivateIp, error) CreatePrivateIp(ctx context.Context, vnicID string) (*core.PrivateIp, error) + CreatePrivateIpWithRequest(ctx context.Context, request core.CreatePrivateIpRequest) (core.PrivateIp, error) GetIpv6(ctx context.Context, id string) (*core.Ipv6, error) + ListIpv6s(ctx context.Context, vnicId string) ([]core.Ipv6, error) + CreateIpv6(ctx context.Context, vnicID string) (*core.Ipv6, error) + CreateIpv6WithRequest(ctx context.Context, request core.CreateIpv6Request) (core.Ipv6, error) GetPublicIpByIpAddress(ctx context.Context, id string) (*core.PublicIp, error) @@ -301,23 +305,40 @@ func (c *client) ListPrivateIps(ctx context.Context, vnicId string) ([]core.Priv } func (c *client) CreatePrivateIp(ctx context.Context, vnicId string) (*core.PrivateIp, error) { - if !c.rateLimiter.Writer.TryAccept() { - return nil, RateLimitError(false, "CreatePrivateIp") - } - requestMetadata := getDefaultRequestMetadata(c.requestMetadata) - resp, err := c.network.CreatePrivateIp(ctx, core.CreatePrivateIpRequest{ + privateIP, err := c.CreatePrivateIpWithRequest(ctx, core.CreatePrivateIpRequest{ CreatePrivateIpDetails: core.CreatePrivateIpDetails{ VnicId: &vnicId, }, - RequestMetadata: requestMetadata, }) - incRequestCounter(err, createVerb, privateIPResource) if err != nil { - c.logger.With(vnicId).Infof("CreatePrivateIp failed %s", pointer.StringDeref(resp.OpcRequestId, "")) - return nil, errors.WithStack(err) + return nil, err } + return &privateIP, nil +} - return &resp.PrivateIp, nil +func (c *client) CreatePrivateIpWithRequest(ctx context.Context, request core.CreatePrivateIpRequest) (core.PrivateIp, error) { + var privateIP core.PrivateIp + err := runWithRateLimitRetry(ctx, c.logger, "CreatePrivateIp", func(ctx context.Context) error { + if !c.rateLimiter.Writer.TryAccept() { + return RateLimitError(false, "CreatePrivateIp") + } + + request.RequestMetadata = getDefaultRequestMetadata(c.requestMetadata) + resp, err := c.network.CreatePrivateIp(ctx, request) + incRequestCounter(err, createVerb, privateIPResource) + if err != nil { + c.logger.Infof("CreatePrivateIp failed %s", pointer.StringDeref(resp.OpcRequestId, "")) + return errors.WithStack(err) + } + + privateIP = resp.PrivateIp + return nil + }) + if err != nil { + return core.PrivateIp{}, err + } + + return privateIP, nil } func (c *client) GetIpv6(ctx context.Context, id string) (*core.Ipv6, error) { @@ -343,6 +364,69 @@ func (c *client) GetIpv6(ctx context.Context, id string) (*core.Ipv6, error) { return &resp.Ipv6, nil } +func (c *client) ListIpv6s(ctx context.Context, vnicId string) ([]core.Ipv6, error) { + var page *string + ipv6s := []core.Ipv6{} + for { + if !c.rateLimiter.Reader.TryAccept() { + return nil, RateLimitError(false, "ListIpv6s") + } + resp, err := c.network.ListIpv6s(ctx, core.ListIpv6sRequest{ + VnicId: &vnicId, + Page: page, + RequestMetadata: c.requestMetadata, + }) + incRequestCounter(err, listVerb, ipv6IPResource) + if err != nil { + c.logger.With(vnicId).Infof("ListIpv6s failed %s", pointer.StringDeref(resp.OpcRequestId, "")) + return nil, errors.WithStack(err) + } + ipv6s = append(ipv6s, resp.Items...) + if page = resp.OpcNextPage; page == nil { + break + } + } + + return ipv6s, nil +} + +func (c *client) CreateIpv6(ctx context.Context, vnicId string) (*core.Ipv6, error) { + ipv6, err := c.CreateIpv6WithRequest(ctx, core.CreateIpv6Request{ + CreateIpv6Details: core.CreateIpv6Details{ + VnicId: &vnicId, + }, + }) + if err != nil { + return nil, err + } + return &ipv6, nil +} + +func (c *client) CreateIpv6WithRequest(ctx context.Context, request core.CreateIpv6Request) (core.Ipv6, error) { + var ipv6 core.Ipv6 + err := runWithRateLimitRetry(ctx, c.logger, "CreateIpv6", func(ctx context.Context) error { + if !c.rateLimiter.Writer.TryAccept() { + return RateLimitError(false, "CreateIpv6") + } + + request.RequestMetadata = getDefaultRequestMetadata(c.requestMetadata) + resp, err := c.network.CreateIpv6(ctx, request) + incRequestCounter(err, createVerb, ipv6IPResource) + if err != nil { + c.logger.Infof("CreateIpv6 failed %s", pointer.StringDeref(resp.OpcRequestId, "")) + return errors.WithStack(err) + } + + ipv6 = resp.Ipv6 + return nil + }) + if err != nil { + return core.Ipv6{}, err + } + + return ipv6, nil +} + func (c *client) CreateNetworkSecurityGroup(ctx context.Context, compartmentId, vcnId, displayName, serviceUid string) (*core.NetworkSecurityGroup, error) { if !c.rateLimiter.Writer.TryAccept() { return nil, RateLimitError(false, "CreateNetworkSecurityGroup") diff --git a/pkg/oci/client/networking_test.go b/pkg/oci/client/networking_test.go index 7d4cfe2f45..ee12daeaee 100644 --- a/pkg/oci/client/networking_test.go +++ b/pkg/oci/client/networking_test.go @@ -1,13 +1,17 @@ package client import ( + "context" + "net/http" "reflect" "testing" "time" "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/core" + "go.uber.org/zap" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/flowcontrol" ) var ( @@ -140,3 +144,124 @@ func Test_client_GetSubnetFromCacheByIP(t *testing.T) { }) } } + +type retryingVirtualNetworkClient struct { + mockVirtualNetworkClient + + createPrivateIpCalls int + createIpv6Calls int +} + +func (c *retryingVirtualNetworkClient) CreatePrivateIp(ctx context.Context, request core.CreatePrivateIpRequest) (core.CreatePrivateIpResponse, error) { + c.createPrivateIpCalls++ + if c.createPrivateIpCalls == 1 { + return core.CreatePrivateIpResponse{}, mockServiceError{ + StatusCode: http.StatusTooManyRequests, + Code: HTTP429TooManyRequestsCode, + Message: "rate limited", + } + } + + return core.CreatePrivateIpResponse{ + PrivateIp: core.PrivateIp{ + Id: common.String("private-ip-id"), + }, + }, nil +} + +func (c *retryingVirtualNetworkClient) CreateIpv6(ctx context.Context, request core.CreateIpv6Request) (core.CreateIpv6Response, error) { + c.createIpv6Calls++ + if c.createIpv6Calls == 1 { + return core.CreateIpv6Response{}, mockServiceError{ + StatusCode: http.StatusTooManyRequests, + Code: HTTP429TooManyRequestsCode, + Message: "rate limited", + } + } + + return core.CreateIpv6Response{ + Ipv6: core.Ipv6{ + Id: common.String("ipv6-id"), + }, + }, nil +} + +func TestCreatePrivateIpWithRequestRetriesRateLimit(t *testing.T) { + originalMaxAttempts := rateLimitRetryMaxAttempts + originalNextDuration := rateLimitRetryNextDuration + rateLimitRetryMaxAttempts = 2 + rateLimitRetryNextDuration = func(attempt uint) time.Duration { return 0 } + defer func() { + rateLimitRetryMaxAttempts = originalMaxAttempts + rateLimitRetryNextDuration = originalNextDuration + }() + + network := &retryingVirtualNetworkClient{} + c := &client{ + network: network, + logger: zap.NewNop().Sugar(), + rateLimiter: RateLimiter{ + Reader: flowcontrol.NewFakeAlwaysRateLimiter(), + Writer: flowcontrol.NewFakeAlwaysRateLimiter(), + }, + } + + privateIP, err := c.CreatePrivateIpWithRequest(context.Background(), core.CreatePrivateIpRequest{ + CreatePrivateIpDetails: core.CreatePrivateIpDetails{ + VnicId: common.String("vnic-id"), + }, + }) + if err != nil { + t.Fatalf("CreatePrivateIpWithRequest() error = %v", err) + } + if network.createPrivateIpCalls != 2 { + t.Fatalf("CreatePrivateIpWithRequest() calls = %d, want 2", network.createPrivateIpCalls) + } + if privateIP.Id == nil || *privateIP.Id != "private-ip-id" { + got := "" + if privateIP.Id != nil { + got = *privateIP.Id + } + t.Fatalf("CreatePrivateIpWithRequest() private IP id = %q, want %q", got, "private-ip-id") + } +} + +func TestCreateIpv6WithRequestRetriesRateLimit(t *testing.T) { + originalMaxAttempts := rateLimitRetryMaxAttempts + originalNextDuration := rateLimitRetryNextDuration + rateLimitRetryMaxAttempts = 2 + rateLimitRetryNextDuration = func(attempt uint) time.Duration { return 0 } + defer func() { + rateLimitRetryMaxAttempts = originalMaxAttempts + rateLimitRetryNextDuration = originalNextDuration + }() + + network := &retryingVirtualNetworkClient{} + c := &client{ + network: network, + logger: zap.NewNop().Sugar(), + rateLimiter: RateLimiter{ + Reader: flowcontrol.NewFakeAlwaysRateLimiter(), + Writer: flowcontrol.NewFakeAlwaysRateLimiter(), + }, + } + + ipv6, err := c.CreateIpv6WithRequest(context.Background(), core.CreateIpv6Request{ + CreateIpv6Details: core.CreateIpv6Details{ + VnicId: common.String("vnic-id"), + }, + }) + if err != nil { + t.Fatalf("CreateIpv6WithRequest() error = %v", err) + } + if network.createIpv6Calls != 2 { + t.Fatalf("CreateIpv6WithRequest() calls = %d, want 2", network.createIpv6Calls) + } + if ipv6.Id == nil || *ipv6.Id != "ipv6-id" { + got := "" + if ipv6.Id != nil { + got = *ipv6.Id + } + t.Fatalf("CreateIpv6WithRequest() IPv6 id = %q, want %q", got, "ipv6-id") + } +} diff --git a/pkg/volume/provisioner/block/block_test.go b/pkg/volume/provisioner/block/block_test.go index 1c9882350b..57f62ca1bd 100644 --- a/pkg/volume/provisioner/block/block_test.go +++ b/pkg/volume/provisioner/block/block_test.go @@ -344,9 +344,26 @@ func (c *MockVirtualNetworkClient) ListPrivateIps(ctx context.Context, id string func (c *MockVirtualNetworkClient) CreatePrivateIp(ctx context.Context, vnicId string) (*core.PrivateIp, error) { return &core.PrivateIp{}, nil } + +func (c *MockVirtualNetworkClient) CreatePrivateIpWithRequest(ctx context.Context, request core.CreatePrivateIpRequest) (core.PrivateIp, error) { + return core.PrivateIp{}, nil +} + func (c *MockVirtualNetworkClient) GetIpv6(ctx context.Context, id string) (*core.Ipv6, error) { return &core.Ipv6{}, nil } + +func (c *MockVirtualNetworkClient) ListIpv6s(ctx context.Context, vnicId string) ([]core.Ipv6, error) { + return []core.Ipv6{}, nil +} + +func (c *MockVirtualNetworkClient) CreateIpv6(ctx context.Context, vnicID string) (*core.Ipv6, error) { + return &core.Ipv6{}, nil +} + +func (c *MockVirtualNetworkClient) CreateIpv6WithRequest(ctx context.Context, request core.CreateIpv6Request) (core.Ipv6, error) { + return core.Ipv6{}, nil +} func (c *MockVirtualNetworkClient) GetSubnet(ctx context.Context, id string) (*core.Subnet, error) { return nil, nil } diff --git a/pkg/volume/provisioner/fss/fss_test.go b/pkg/volume/provisioner/fss/fss_test.go index c3b464fbc6..5244fbd310 100644 --- a/pkg/volume/provisioner/fss/fss_test.go +++ b/pkg/volume/provisioner/fss/fss_test.go @@ -345,6 +345,10 @@ func (c *MockVirtualNetworkClient) CreatePrivateIp(ctx context.Context, vnicId s return &core.PrivateIp{}, nil } +func (c *MockVirtualNetworkClient) CreatePrivateIpWithRequest(ctx context.Context, request core.CreatePrivateIpRequest) (core.PrivateIp, error) { + return core.PrivateIp{}, nil +} + func (c *MockVirtualNetworkClient) ListIpv6s(ctx context.Context, vnicId string) ([]core.Ipv6, error) { return []core.Ipv6{}, nil } @@ -353,6 +357,10 @@ func (c *MockVirtualNetworkClient) CreateIpv6(ctx context.Context, vnicID string return &core.Ipv6{}, nil } +func (c *MockVirtualNetworkClient) CreateIpv6WithRequest(ctx context.Context, request core.CreateIpv6Request) (core.Ipv6, error) { + return core.Ipv6{}, nil +} + func (c *MockVirtualNetworkClient) GetSubnet(ctx context.Context, id string) (*core.Subnet, error) { return nil, nil }