Skip to content

Commit

Permalink
revert service ns suffix fallback, update docs
Browse files Browse the repository at this point in the history
Signed-off-by: Ross Kirkpatrick <[email protected]>
  • Loading branch information
rosskirkpat committed Nov 19, 2024
1 parent 8d0ea9f commit 9facb9c
Show file tree
Hide file tree
Showing 4 changed files with 85 additions and 32 deletions.
6 changes: 5 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,11 @@ These configuration options can be specified via the `port-*` annotation, encode
#### Shared IP Load-Balancing
**NOTE:** This feature requires contacting [Customer Support](https://www.linode.com/support/contact/) to enable provisioning additional IPs.

Services of `type: LoadBalancer` can receive an external IP not backed by a NodeBalancer if `--bgp-node-selector` is set on the Linode CCM and `--load-balancer-type` is set to `cilium-bgp`. Additionally, if you plan to run multiple clusters within a single API Region, setting `--ip-holder-suffix` on the Linode CCM to a unique value per cluster will create an ip-holder nanode for every cluster created within that API Region.
Services of `type: LoadBalancer` can receive an external IP not backed by a NodeBalancer if `--bgp-node-selector` is set on the Linode CCM and `--load-balancer-type` is set to `cilium-bgp`.

If you plan to run multiple clusters within a single API Region, setting `--ip-holder-suffix` on the Linode CCM to a unique value per cluster will create an ip-holder nanode for each cluster that is created within that API Region (ex. `linode-ccm-ip-holder-<region>-<ip-holder-suffix>`).

If you do not set `--ip-holder-suffix` on the Linode CCM, it will use the following naming convention for the ip-holder nanode (ex. `linode-ccm-ip-holder-<region>`).

This feature requires the Kubernetes cluster to be using [Cilium](https://cilium.io/) as the CNI with the `bgp-control-plane` feature enabled.

Expand Down
20 changes: 9 additions & 11 deletions cloud/linode/cilium_loadbalancers.go
Original file line number Diff line number Diff line change
Expand Up @@ -282,11 +282,6 @@ func (l *loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service)
if Options.IpHolderSuffix != "" {
ipHolderSuffix = Options.IpHolderSuffix
klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn)
} else {
if service.Namespace != "" {
ipHolderSuffix = service.Namespace
klog.V(3).Infof("using service-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn)
}
}

ipHolder, err := l.getIPHolder(ctx, ipHolderSuffix)
Expand Down Expand Up @@ -391,18 +386,21 @@ func (l *loadbalancers) getIPHolder(ctx context.Context, suffix string) (*linode

// generateClusterScopedIPHolderLinodeName attempts to generate a unique name for the IP Holder
// instance used alongside Cilium LoadBalancers and Shared IPs for Kubernetes Services.
// The `suffix` is set to either value of the `--ip-holder-suffix` parameter, if it is set.
// The backup method involves keying off the service namespace.
// While it _is_ possible to have an empty suffix passed in, it would mean that kubernetes
// allowed the creation of a service without a namespace, which is highly improbable.
// If the `--ip-holder-suffix` arg is passed when running Linode CCM, `suffix` is set to that value.
// N.B. This function uses SafeConcatName to ensure that the label is <= 63 characters.
// This may result in a hash of the suffix value being used instead of the actual suffix value
// that was passed to this function if the length of the suffix exceeds a certain character limit.
func generateClusterScopedIPHolderLinodeName(zone, suffix string) string {
func generateClusterScopedIPHolderLinodeName(zone, suffix string) (label string) {
// since Linode CCM consumers are varied, we require a method of providing a
// suffix that does not rely on the use of a specific product (ex. LKE) to
// have a specific piece of metadata (ex. annotation(s), label(s) ) present to key off of.
label := SafeConcatName(DNS1123LabelMaxLength, ipHolderLabelPrefix, zone, suffix)

if suffix == "" {
// this avoids a double hyphen if suffix is empty (ex. linode-ccm-ip-holder--us-ord)
label = SafeConcatName(DNS1123LabelMaxLength, ipHolderLabelPrefix, zone)
} else {
label = SafeConcatName(DNS1123LabelMaxLength, ipHolderLabelPrefix, zone, suffix)
}
klog.V(5).Infof("generated IP Holder Linode label: %s", label)
return label
}
Expand Down
80 changes: 70 additions & 10 deletions cloud/linode/cilium_loadbalancers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,13 +77,7 @@ var (
Region: "us-west",
IPv4: []*net.IP{&publicIPv4},
}
newIpHolderInstance = linodego.Instance{
ID: 123456,
Label: generateClusterScopedIPHolderLinodeName(zone, "linodelb"),
Type: "g6-standard-1",
Region: "us-west",
IPv4: []*net.IP{&publicIPv4},
}
newIpHolderInstance = linodego.Instance{}
)

func TestCiliumCCMLoadBalancers(t *testing.T) {
Expand Down Expand Up @@ -112,8 +106,12 @@ func TestCiliumCCMLoadBalancers(t *testing.T) {
f: testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffix,
},
{
name: "Create Cilium Load Balancer With no existing IP holder nanode",
f: testCreateWithNoExistingIPHolder,
name: "Create Cilium Load Balancer With no existing IP holder nanode and short suffix",
f: testCreateWithNoExistingIPHolderUsingShortSuffix,
},
{
name: "Create Cilium Load Balancer With no existing IP holder nanode and no suffix",
f: testCreateWithNoExistingIPHolderUsingNoSuffix,
},
{
name: "Create Cilium Load Balancer With no existing IP holder nanode and 63 char long suffix",
Expand Down Expand Up @@ -190,10 +188,21 @@ func addNodes(t *testing.T, kubeClient kubernetes.Interface, nodes []*v1.Node) {
}
}

func createNewIpHolderInstance() linodego.Instance {
return linodego.Instance{
ID: 123456,
Label: generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix),
Type: "g6-standard-1",
Region: "us-west",
IPv4: []*net.IP{&publicIPv4},
}
}

func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) {
Options.BGPNodeSelector = ""
Options.IpHolderSuffix = "linodelb"
svc := createTestService()
newIpHolderInstance = createNewIpHolderInstance()

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
Expand Down Expand Up @@ -258,6 +267,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) {
func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) {
Options.BGPNodeSelector = "cilium-bgp-peering=true"
svc := createTestService()
newIpHolderInstance = createNewIpHolderInstance()

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
Expand Down Expand Up @@ -297,6 +307,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T,
Options.BGPNodeSelector = "cilium-bgp-peering=true"
Options.IpHolderSuffix = "linodelb"
svc := createTestService()
newIpHolderInstance = createNewIpHolderInstance()

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
Expand Down Expand Up @@ -336,6 +347,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi
Options.BGPNodeSelector = "cilium-bgp-peering=true"
Options.IpHolderSuffix = "OaTJrRuufacHVougjwkpBpmstiqvswvBNEMWXsRYfMBTCkKIUTXpbGIcIbDWSQp"
svc := createTestService()
newIpHolderInstance = createNewIpHolderInstance()

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
Expand Down Expand Up @@ -371,10 +383,55 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi
}
}

func testCreateWithNoExistingIPHolder(t *testing.T, mc *mocks.MockClient) {
func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockClient) {
Options.BGPNodeSelector = "cilium-bgp-peering=true"
Options.IpHolderSuffix = ""
svc := createTestService()
newIpHolderInstance = createNewIpHolderInstance()

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
addService(t, kubeClient, svc)
addNodes(t, kubeClient, nodes)
lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType}

filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)}
rawFilter, _ := json.Marshal(filter)
mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil)
filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)}
rawFilter, _ = json.Marshal(filter)
mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil)
dummySharedIP := "45.76.101.26"
mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil)
mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{
IPv4: &linodego.InstanceIPv4Response{
Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}},
},
}, nil)
mc.EXPECT().AddInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil)
mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{
IPs: []string{dummySharedIP},
LinodeID: 11111,
}).Times(1)
mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{
IPs: []string{dummySharedIP},
LinodeID: 22222,
}).Times(1)

lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes)
if err != nil {
t.Fatalf("expected a nil error, got %v", err)
}
if lbStatus == nil {
t.Fatal("expected non-nil lbStatus")
}
}

func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.MockClient) {
Options.BGPNodeSelector = "cilium-bgp-peering=true"
Options.IpHolderSuffix = "linodelb"
svc := createTestService()
newIpHolderInstance = createNewIpHolderInstance()

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
Expand Down Expand Up @@ -418,6 +475,7 @@ func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.Moc
Options.BGPNodeSelector = "cilium-bgp-peering=true"
Options.IpHolderSuffix = "OaTJrRuufacHVougjwkpBpmstiqvswvBNEMWXsRYfMBTCkKIUTXpbGIcIbDWSQp"
svc := createTestService()
newIpHolderInstance = createNewIpHolderInstance()

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
Expand Down Expand Up @@ -487,6 +545,7 @@ func testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention(t *testi
Options.BGPNodeSelector = "cilium-bgp-peering=true"
Options.IpHolderSuffix = "linodelb"
svc := createTestService()
newIpHolderInstance = createNewIpHolderInstance()

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
Expand Down Expand Up @@ -573,6 +632,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testi
Options.BGPNodeSelector = "cilium-bgp-peering=true"
Options.IpHolderSuffix = "linodelb"
svc := createTestService()
newIpHolderInstance = createNewIpHolderInstance()

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
Expand Down
11 changes: 1 addition & 10 deletions cloud/linode/loadbalancers.go
Original file line number Diff line number Diff line change
Expand Up @@ -226,11 +226,6 @@ func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri
if Options.IpHolderSuffix != "" {
ipHolderSuffix = Options.IpHolderSuffix
klog.Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn)
} else {
if service.Namespace != "" {
ipHolderSuffix = service.Namespace
klog.Infof("using service-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn)
}
}

// CiliumLoadBalancerIPPool does not yet exist for the service
Expand Down Expand Up @@ -444,12 +439,8 @@ func (l *loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri
if Options.IpHolderSuffix != "" {
ipHolderSuffix = Options.IpHolderSuffix
klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn)
} else {
if service.Namespace != "" {
ipHolderSuffix = service.Namespace
klog.V(3).Infof("using service-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn)
}
}

// make sure that IPs are shared properly on the Node if using load-balancers not backed by NodeBalancers
for _, node := range nodes {
if err := l.handleIPSharing(ctx, node, ipHolderSuffix); err != nil {
Expand Down

0 comments on commit 9facb9c

Please sign in to comment.