From 9facb9c8951f4d0627f82bf382fd64f089d7281d Mon Sep 17 00:00:00 2001 From: Ross Kirkpatrick Date: Tue, 19 Nov 2024 15:02:26 -0500 Subject: [PATCH] revert service ns suffix fallback, update docs Signed-off-by: Ross Kirkpatrick --- README.md | 6 +- cloud/linode/cilium_loadbalancers.go | 20 +++--- cloud/linode/cilium_loadbalancers_test.go | 80 ++++++++++++++++++++--- cloud/linode/loadbalancers.go | 11 +--- 4 files changed, 85 insertions(+), 32 deletions(-) diff --git a/README.md b/README.md index 7619b9e0..f23e6d34 100644 --- a/README.md +++ b/README.md @@ -87,7 +87,11 @@ These configuration options can be specified via the `port-*` annotation, encode #### Shared IP Load-Balancing **NOTE:** This feature requires contacting [Customer Support](https://www.linode.com/support/contact/) to enable provisioning additional IPs. -Services of `type: LoadBalancer` can receive an external IP not backed by a NodeBalancer if `--bgp-node-selector` is set on the Linode CCM and `--load-balancer-type` is set to `cilium-bgp`. Additionally, if you plan to run multiple clusters within a single API Region, setting `--ip-holder-suffix` on the Linode CCM to a unique value per cluster will create an ip-holder nanode for every cluster created within that API Region. +Services of `type: LoadBalancer` can receive an external IP not backed by a NodeBalancer if `--bgp-node-selector` is set on the Linode CCM and `--load-balancer-type` is set to `cilium-bgp`. + +If you plan to run multiple clusters within a single API Region, setting `--ip-holder-suffix` on the Linode CCM to a unique value per cluster will create an ip-holder nanode for each cluster that is created within that API Region (ex. `linode-ccm-ip-holder--`). + +If you do not set `--ip-holder-suffix` on the Linode CCM, it will use the following naming convention for the ip-holder nanode (ex. `linode-ccm-ip-holder-`). This feature requires the Kubernetes cluster to be using [Cilium](https://cilium.io/) as the CNI with the `bgp-control-plane` feature enabled. diff --git a/cloud/linode/cilium_loadbalancers.go b/cloud/linode/cilium_loadbalancers.go index 58c80f52..34684697 100644 --- a/cloud/linode/cilium_loadbalancers.go +++ b/cloud/linode/cilium_loadbalancers.go @@ -282,11 +282,6 @@ func (l *loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) if Options.IpHolderSuffix != "" { ipHolderSuffix = Options.IpHolderSuffix klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) - } else { - if service.Namespace != "" { - ipHolderSuffix = service.Namespace - klog.V(3).Infof("using service-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) - } } ipHolder, err := l.getIPHolder(ctx, ipHolderSuffix) @@ -391,18 +386,21 @@ func (l *loadbalancers) getIPHolder(ctx context.Context, suffix string) (*linode // generateClusterScopedIPHolderLinodeName attempts to generate a unique name for the IP Holder // instance used alongside Cilium LoadBalancers and Shared IPs for Kubernetes Services. -// The `suffix` is set to either value of the `--ip-holder-suffix` parameter, if it is set. -// The backup method involves keying off the service namespace. -// While it _is_ possible to have an empty suffix passed in, it would mean that kubernetes -// allowed the creation of a service without a namespace, which is highly improbable. +// If the `--ip-holder-suffix` arg is passed when running Linode CCM, `suffix` is set to that value. // N.B. This function uses SafeConcatName to ensure that the label is <= 63 characters. // This may result in a hash of the suffix value being used instead of the actual suffix value // that was passed to this function if the length of the suffix exceeds a certain character limit. -func generateClusterScopedIPHolderLinodeName(zone, suffix string) string { +func generateClusterScopedIPHolderLinodeName(zone, suffix string) (label string) { // since Linode CCM consumers are varied, we require a method of providing a // suffix that does not rely on the use of a specific product (ex. LKE) to // have a specific piece of metadata (ex. annotation(s), label(s) ) present to key off of. - label := SafeConcatName(DNS1123LabelMaxLength, ipHolderLabelPrefix, zone, suffix) + + if suffix == "" { + // this avoids a double hyphen if suffix is empty (ex. linode-ccm-ip-holder--us-ord) + label = SafeConcatName(DNS1123LabelMaxLength, ipHolderLabelPrefix, zone) + } else { + label = SafeConcatName(DNS1123LabelMaxLength, ipHolderLabelPrefix, zone, suffix) + } klog.V(5).Infof("generated IP Holder Linode label: %s", label) return label } diff --git a/cloud/linode/cilium_loadbalancers_test.go b/cloud/linode/cilium_loadbalancers_test.go index e4991bfd..1fdace1c 100644 --- a/cloud/linode/cilium_loadbalancers_test.go +++ b/cloud/linode/cilium_loadbalancers_test.go @@ -77,13 +77,7 @@ var ( Region: "us-west", IPv4: []*net.IP{&publicIPv4}, } - newIpHolderInstance = linodego.Instance{ - ID: 123456, - Label: generateClusterScopedIPHolderLinodeName(zone, "linodelb"), - Type: "g6-standard-1", - Region: "us-west", - IPv4: []*net.IP{&publicIPv4}, - } + newIpHolderInstance = linodego.Instance{} ) func TestCiliumCCMLoadBalancers(t *testing.T) { @@ -112,8 +106,12 @@ func TestCiliumCCMLoadBalancers(t *testing.T) { f: testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffix, }, { - name: "Create Cilium Load Balancer With no existing IP holder nanode", - f: testCreateWithNoExistingIPHolder, + name: "Create Cilium Load Balancer With no existing IP holder nanode and short suffix", + f: testCreateWithNoExistingIPHolderUsingShortSuffix, + }, + { + name: "Create Cilium Load Balancer With no existing IP holder nanode and no suffix", + f: testCreateWithNoExistingIPHolderUsingNoSuffix, }, { name: "Create Cilium Load Balancer With no existing IP holder nanode and 63 char long suffix", @@ -190,10 +188,21 @@ func addNodes(t *testing.T, kubeClient kubernetes.Interface, nodes []*v1.Node) { } } +func createNewIpHolderInstance() linodego.Instance { + return linodego.Instance{ + ID: 123456, + Label: generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix), + Type: "g6-standard-1", + Region: "us-west", + IPv4: []*net.IP{&publicIPv4}, + } +} + func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { Options.BGPNodeSelector = "" Options.IpHolderSuffix = "linodelb" svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} @@ -258,6 +267,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { Options.BGPNodeSelector = "cilium-bgp-peering=true" svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} @@ -297,6 +307,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T, Options.BGPNodeSelector = "cilium-bgp-peering=true" Options.IpHolderSuffix = "linodelb" svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} @@ -336,6 +347,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi Options.BGPNodeSelector = "cilium-bgp-peering=true" Options.IpHolderSuffix = "OaTJrRuufacHVougjwkpBpmstiqvswvBNEMWXsRYfMBTCkKIUTXpbGIcIbDWSQp" svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} @@ -371,10 +383,55 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi } } -func testCreateWithNoExistingIPHolder(t *testing.T, mc *mocks.MockClient) { +func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.IpHolderSuffix = "" + svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + dummySharedIP := "45.76.101.26" + mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 11111, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 22222, + }).Times(1) + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + if lbStatus == nil { + t.Fatal("expected non-nil lbStatus") + } +} + +func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.MockClient) { Options.BGPNodeSelector = "cilium-bgp-peering=true" Options.IpHolderSuffix = "linodelb" svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} @@ -418,6 +475,7 @@ func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.Moc Options.BGPNodeSelector = "cilium-bgp-peering=true" Options.IpHolderSuffix = "OaTJrRuufacHVougjwkpBpmstiqvswvBNEMWXsRYfMBTCkKIUTXpbGIcIbDWSQp" svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} @@ -487,6 +545,7 @@ func testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention(t *testi Options.BGPNodeSelector = "cilium-bgp-peering=true" Options.IpHolderSuffix = "linodelb" svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} @@ -573,6 +632,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testi Options.BGPNodeSelector = "cilium-bgp-peering=true" Options.IpHolderSuffix = "linodelb" svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} diff --git a/cloud/linode/loadbalancers.go b/cloud/linode/loadbalancers.go index 2998be3e..e9c80f3d 100644 --- a/cloud/linode/loadbalancers.go +++ b/cloud/linode/loadbalancers.go @@ -226,11 +226,6 @@ func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri if Options.IpHolderSuffix != "" { ipHolderSuffix = Options.IpHolderSuffix klog.Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) - } else { - if service.Namespace != "" { - ipHolderSuffix = service.Namespace - klog.Infof("using service-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) - } } // CiliumLoadBalancerIPPool does not yet exist for the service @@ -444,12 +439,8 @@ func (l *loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri if Options.IpHolderSuffix != "" { ipHolderSuffix = Options.IpHolderSuffix klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) - } else { - if service.Namespace != "" { - ipHolderSuffix = service.Namespace - klog.V(3).Infof("using service-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) - } } + // make sure that IPs are shared properly on the Node if using load-balancers not backed by NodeBalancers for _, node := range nodes { if err := l.handleIPSharing(ctx, node, ipHolderSuffix); err != nil {