Skip to content

Commit

Permalink
scaleway: terraform support (#15892)
Browse files Browse the repository at this point in the history
* tasks dependencies

* FindAddresses should proceed even if LBID is empty

* refacto instance: handle several instances in IG + volumeSize in model

* refacto lb_backend: set servers' IPs in backend task rather than instance

* dns changes for tf support

* render Scaleway s3 objects + add zone to outputs

* add terraform support to documentation

* new instance unique name should be given according to actual instances names

* avoid untimely changes in tf + tag instance IPs to avoid orphaned IPs at cluster deletion

* update integration tests

* review comments

* add IPs to the resources to be deleted with the cluster

* add task + model for DNS record
  • Loading branch information
Mia-Cross committed Sep 29, 2023
1 parent 672b61f commit e12f3dc
Show file tree
Hide file tree
Showing 22 changed files with 931 additions and 243 deletions.
4 changes: 2 additions & 2 deletions cmd/kops/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1768,8 +1768,8 @@ func (i *integrationTest) runTestTerraformScaleway(t *testing.T) {
"aws_s3_object_"+i.clusterName+"-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content",
"aws_s3_object_"+i.clusterName+"-addons-limit-range.addons.k8s.io_content",
"aws_s3_object_"+i.clusterName+"-addons-networking.cilium.io-k8s-1.16_content",
"scaleway_instance_server_control-plane-fr-par-1_user_data",
"scaleway_instance_server_nodes-fr-par-1_user_data",
"scaleway_instance_server_control-plane-fr-par-1-0_user_data",
"scaleway_instance_server_nodes-fr-par-1-0_user_data",
)

i.runTest(t, ctx, h, expectedFilenames, "", "", nil)
Expand Down
134 changes: 77 additions & 57 deletions dnsprovider/pkg/dnsprovider/providers/scaleway/dns.go
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ func (r *resourceRecordChangeset) Apply(ctx context.Context) error {
return nil
}

updateRecordsRequest := []*domain.RecordChange(nil)
changeBatch := []*domain.RecordChange(nil)
klog.V(8).Infof("applying changes in record change set : [ %d additions | %d upserts | %d removals ]",
len(r.additions), len(r.upserts), len(r.removals))

Expand All @@ -353,75 +353,36 @@ func (r *resourceRecordChangeset) Apply(ctx context.Context) error {
return err
}

if len(r.additions) > 0 {
recordsToAdd := []*domain.Record(nil)
for _, rrset := range r.additions {
recordName := strings.TrimSuffix(rrset.Name(), ".")
recordName = strings.TrimSuffix(recordName, "."+r.zone.Name())
for _, rrdata := range rrset.Rrdatas() {
recordsToAdd = append(recordsToAdd, &domain.Record{
Name: recordName,
Data: rrdata,
TTL: uint32(rrset.Ttl()),
Type: domain.RecordType(rrset.Type()),
})
}
klog.V(8).Infof("adding new DNS record %q to zone %q", recordName, r.zone.name)
updateRecordsRequest = append(updateRecordsRequest, &domain.RecordChange{
Add: &domain.RecordChangeAdd{
Records: recordsToAdd,
},
})
}
}

// Scaleway's Domain API doesn't allow more than one edit with the same record name in one request, which happens
// when there are several control-planes, so we have to check for duplicates in the upsert category and if there are,
// treat them as additions instead
if len(r.upserts) > 0 {
for _, rrset := range r.upserts {
for _, rrdata := range rrset.Rrdatas() {
for _, record := range records {
recordNameWithZone := fmt.Sprintf("%s.%s.", record.Name, r.zone.Name())
if recordNameWithZone == dns.EnsureDotSuffix(rrset.Name()) && rrset.Type() == rrstype.RrsType(record.Type) {
klog.V(8).Infof("changing DNS record %q of zone %q", record.Name, r.zone.Name())
updateRecordsRequest = append(updateRecordsRequest, &domain.RecordChange{
Set: &domain.RecordChangeSet{
ID: &record.ID,
Records: []*domain.Record{
{
Name: record.Name,
Data: rrdata,
TTL: uint32(rrset.Ttl()),
Type: domain.RecordType(rrset.Type()),
},
},
},
})
}
for i, rrdata := range rrset.Rrdatas() {
if i == 0 {
changeBatch = putRecordToUpdateInChangeBatch(changeBatch, rrset, r.zone.Name(), records, rrdata)
} else {
rrsetFromIndex1 := r.rrsets.New(rrset.Name(), rrset.Rrdatas()[1:], rrset.Ttl(), rrset.Type())
changeBatch = putRecordToAddInChangeBatch(changeBatch, rrsetFromIndex1, r.zone.Name())
break
}
}
}
}

if len(r.additions) > 0 {
for _, rrset := range r.additions {
changeBatch = putRecordToAddInChangeBatch(changeBatch, rrset, r.zone.Name())
}
}
if len(r.removals) > 0 {
for _, rrset := range r.removals {
for _, record := range records {
recordNameWithZone := fmt.Sprintf("%s.%s.", record.Name, r.zone.Name())
if recordNameWithZone == dns.EnsureDotSuffix(rrset.Name()) && record.Data == rrset.Rrdatas()[0] &&
rrset.Type() == rrstype.RrsType(record.Type) {
klog.V(8).Infof("removing DNS record %q of zone %q", record.Name, r.zone.name)
updateRecordsRequest = append(updateRecordsRequest, &domain.RecordChange{
Delete: &domain.RecordChangeDelete{
ID: &record.ID,
},
})
}

}
changeBatch = putRecordToDeleteInChangeBatch(changeBatch, rrset, r.zone.Name(), records)
}
}

_, err = r.domainAPI.UpdateDNSZoneRecords(&domain.UpdateDNSZoneRecordsRequest{
DNSZone: r.zone.Name(),
Changes: updateRecordsRequest,
Changes: changeBatch,
}, scw.WithContext(ctx))
if err != nil {
return fmt.Errorf("failed to apply resource record set: %w", err)
Expand Down Expand Up @@ -456,3 +417,62 @@ func listRecords(api DomainAPI, zoneName string) ([]*domain.Record, error) {

return records.Records, err
}

func putRecordToAddInChangeBatch(changeBatch []*domain.RecordChange, rrset dnsprovider.ResourceRecordSet, zoneName string) []*domain.RecordChange {
recordsToAdd := []*domain.Record(nil)
recordName := strings.TrimSuffix(rrset.Name(), ".")
recordName = strings.TrimSuffix(recordName, "."+zoneName)
for _, rrdata := range rrset.Rrdatas() {
recordsToAdd = append(recordsToAdd, &domain.Record{
Name: recordName,
Data: rrdata,
TTL: uint32(rrset.Ttl()),
Type: domain.RecordType(rrset.Type()),
})
}
klog.V(8).Infof("adding new DNS record %q to zone %q", recordName, zoneName)
return append(changeBatch, &domain.RecordChange{
Add: &domain.RecordChangeAdd{
Records: recordsToAdd,
},
})
}

func putRecordToUpdateInChangeBatch(changeBatch []*domain.RecordChange, rrset dnsprovider.ResourceRecordSet, zoneName string, records []*domain.Record, rrdata string) []*domain.RecordChange {
for _, record := range records {
recordNameWithZone := fmt.Sprintf("%s.%s.", record.Name, zoneName)
if recordNameWithZone == dns.EnsureDotSuffix(rrset.Name()) && rrset.Type() == rrstype.RrsType(record.Type) {
klog.V(8).Infof("changing DNS record %q of zone %q", record.Name, zoneName)
return append(changeBatch, &domain.RecordChange{
Set: &domain.RecordChangeSet{
ID: &record.ID,
Records: []*domain.Record{
{
Name: record.Name,
Data: rrdata,
TTL: uint32(rrset.Ttl()),
Type: domain.RecordType(rrset.Type()),
},
},
},
})
}
}
return changeBatch
}

func putRecordToDeleteInChangeBatch(changeBatch []*domain.RecordChange, rrset dnsprovider.ResourceRecordSet, zoneName string, records []*domain.Record) []*domain.RecordChange {
for _, record := range records {
recordNameWithZone := fmt.Sprintf("%s.%s.", record.Name, zoneName)
if recordNameWithZone == dns.EnsureDotSuffix(rrset.Name()) && record.Data == rrset.Rrdatas()[0] &&
rrset.Type() == rrstype.RrsType(record.Type) {
klog.V(8).Infof("removing DNS record %q of zone %q", record.Name, zoneName)
return append(changeBatch, &domain.RecordChange{
Delete: &domain.RecordChangeDelete{
ID: &record.ID,
},
})
}
}
return changeBatch
}
72 changes: 67 additions & 5 deletions docs/getting_started/scaleway.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,10 @@
* Instance size (also called commercial type)
* Migrating from single to multi-master

### Coming soon

* [Terraform](https://github.com/scaleway/terraform-provider-scaleway) support
* Private network

### Next features to implement

* [Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/scaleway) support
* Private network
* BareMetal servers

## Requirements
Expand Down Expand Up @@ -111,3 +107,69 @@ kops update cluster mycluster.k8s.local --yes
```bash
kops delete cluster mycluster.k8s.local --yes
```

# Terraform support

kOps offers the possibility to generate a Terraform configuration corresponding to the cluster that would have been created directly otherwise.

You can find more information on the dedicated page on [kOps Terraform support](../terraform.md) or [Scaleway's Terraform provider's documentation](https://github.com/scaleway/terraform-provider-scaleway).

## For clusters without load-balancers

This concerns clusters using Scaleway DNS.

```bash
kops create cluster --cloud=scaleway --name=mycluster.mydomain.com --zones=fr-par-1 --target=terraform --out=$OUTPUT_DIR
cd $OUTPUT_DIR
terraform init
terraform apply
```
kOps will generate a `kubernetes.tf` file in the output directory of your choice, you just have to initialize Terraform and apply the configuration.
NB: keep in mind that every new call to kOps with the flags `--target=terraform --out=$OUTPUT_DIR` will overwrite `kubernetes.tf` so any changes that you made to it will be lost.

## For clusters with load-balancers

This concerns clusters using no DNS and gossip DNS. For these types of cluster, a small trick is needed because kOps doesn't know the IPs of the load-balancer at the time of writing the instances' cloud-init configuration, so we will have to run an update, then a rolling-update.

### Creating a valid cluster

```bash
kops create cluster --cloud=scaleway --name=my.cluster --zones=fr-par-1 --target=terraform --out=$OUTPUT_DIR
cd $OUTPUT_DIR
terraform init
terraform apply
# Now that the load-balancer is up, we update the cluster to integrate its IP to the instances' configuration
kops update cluster my.cluster --target=terraform --out=$OUTPUT_DIR
# Then we replace the instances's for them to reboot with the new configuration (the --cloudonly flag is needed because the cluster can't be validated at this point)
kops rolling-update cluster my.cluster --cloudonly --yes
```

### Keeping the Terraform state consistent after a rolling-update

Now that the instances have been replaced by the rolling-update, your cluster can now be validated.
However, since resources have changed outside of Terraform, the state is now invalid. If you need to keep the state consistent with the cluster, you should import the new instances. This can be achieved with this script:

```bash
# First we need to retrieve the names of the instances
cd "$OUTPUT_DIR" || exit
TF_SERVERS=($(grep 'resource "scaleway_instance_server"' < kubernetes.tf | awk '{print $3}' | cut -d'"' -f 2))
# Then we get the zone for the import
ZONE=$(terraform output zone | cut -d '"' -f2)
# And for each instance:
for SERVER in "${TF_SERVERS[@]}"; do
# We remove the stale instance from the state
terraform state rm scaleway_instance_server.$SERVER
# We fetch its new ID
NEW_SERVER_ID=$(scw instance server list zone=$ZONE name=$SERVER -o template="{{ .ID }}")
if [ "$NEW_SERVER_ID" == "" ]; then
echo "could not find new ID of the server $SERVER"
fi
# We import the new instance in the state
terraform import scaleway_instance_server.$SERVER $ZONE/$NEW_SERVER_ID
done
```

NB: for the script to run, you will need to have the [Scaleway CLI](https://github.com/scaleway/scaleway-cli) installed. You can also fetch the IDs of the new instances manually in the [Scaleway Console](https://console.scaleway.com) but if you have a lot of them this may not be practical.
If you need help with the CLI, these resources might help:
* [Installing the CLI](https://github.com/scaleway/scaleway-cli#readme)
* [Tutorial for setting up the CLI and managing instances with it](https://www.scaleway.com/en/docs/compute/instances/api-cli/creating-managing-instances-with-cliv2/)
3 changes: 2 additions & 1 deletion pkg/model/scalewaymodel/api_loadbalancer.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ func (b *APILoadBalancerModelBuilder) Build(c *fi.CloudupModelBuilderContext) er
loadBalancer := &scalewaytasks.LoadBalancer{
Name: fi.PtrTo(loadBalancerName),
Zone: fi.PtrTo(string(zone)),
Type: scalewaytasks.LbDefaultType,
Lifecycle: b.Lifecycle,
Tags: lbTags,
Description: "Load-balancer for kops cluster " + b.ClusterName(),
Expand Down Expand Up @@ -109,7 +110,7 @@ func createLbBackendAndFrontend(name string, port int, zone scw.Zone, loadBalanc
ForwardPort: fi.PtrTo(int32(port)),
ForwardPortAlgorithm: fi.PtrTo(string(lb.ForwardPortAlgorithmRoundrobin)),
StickySessions: fi.PtrTo(string(lb.StickySessionsTypeNone)),
ProxyProtocol: fi.PtrTo(string(lb.ProxyProtocolProxyProtocolUnknown)),
ProxyProtocol: fi.PtrTo(string(lb.ProxyProtocolProxyProtocolNone)),
LoadBalancer: loadBalancer,
}

Expand Down
84 changes: 84 additions & 0 deletions pkg/model/scalewaymodel/dns.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package scalewaymodel

import (
"strings"

domain "github.com/scaleway/scaleway-sdk-go/api/domain/v2beta1"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/scalewaytasks"
)

const (
placeholderIP = "203.0.113.123"
kopsControllerInternalRecordPrefix = "kops-controller.internal."
defaultTTL = uint32(60)
)

type DNSModelBuilder struct {
*ScwModelContext
Lifecycle fi.Lifecycle
}

var _ fi.CloudupModelBuilder = &DNSModelBuilder{}

func (b *DNSModelBuilder) Build(c *fi.CloudupModelBuilderContext) error {
if !b.Cluster.PublishesDNSRecords() {
return nil
}

if !b.UseLoadBalancerForAPI() {
recordShortName := strings.TrimSuffix(b.Cluster.Spec.API.PublicName, "."+b.Cluster.Spec.DNSZone)
dnsAPIExternal := &scalewaytasks.DNSRecord{
Name: fi.PtrTo(recordShortName),
Data: fi.PtrTo(placeholderIP),
DNSZone: fi.PtrTo(b.Cluster.Spec.DNSZone),
Type: fi.PtrTo(domain.RecordTypeA.String()),
TTL: fi.PtrTo(defaultTTL),
Lifecycle: b.Lifecycle,
}
c.AddTask(dnsAPIExternal)
}

if !b.UseLoadBalancerForInternalAPI() {
recordShortName := strings.TrimSuffix(b.Cluster.APIInternalName(), "."+b.Cluster.Spec.DNSZone)
dnsAPIInternal := &scalewaytasks.DNSRecord{
Name: fi.PtrTo(recordShortName),
Data: fi.PtrTo(placeholderIP),
DNSZone: fi.PtrTo(b.Cluster.Spec.DNSZone),
Type: fi.PtrTo(domain.RecordTypeA.String()),
TTL: fi.PtrTo(defaultTTL),
Lifecycle: b.Lifecycle,
}
c.AddTask(dnsAPIInternal)
}

recordSuffix := strings.TrimSuffix(b.Cluster.ObjectMeta.Name, "."+b.Cluster.Spec.DNSZone)
recordShortName := kopsControllerInternalRecordPrefix + recordSuffix
kopsControllerInternal := &scalewaytasks.DNSRecord{
Name: fi.PtrTo(recordShortName),
Data: fi.PtrTo(placeholderIP),
DNSZone: fi.PtrTo(b.Cluster.Spec.DNSZone),
Type: fi.PtrTo(domain.RecordTypeA.String()),
TTL: fi.PtrTo(defaultTTL),
Lifecycle: b.Lifecycle,
}
c.AddTask(kopsControllerInternal)

return nil
}
Loading

0 comments on commit e12f3dc

Please sign in to comment.