Using Terraform

In a new folder, we create the following Terraform configuration files, they contain all the resources to create a SKS cluster and the related components:

  • provider.tf specifies the version of the Exoscale provider
  • security_group.tf specifies the security group and the rules to be applied to the cluster’s nodes
  • cluster.tf defines the cluster’s configuration
  • node_pool.tf defines the group of nodes associated to the cluster
  • kubeconfig.tf is used to create a local kubeconfig file to access the cluster
  • variables.tf defines the input information
  • output.tf specifies the information to be displayed back
terraform {
  required_providers {
    exoscale = {
      source  = "exoscale/exoscale"
      version = "~> 0.60.0"
    }
  }
}

provider "exoscale" {}
# A security group so the nodes can communicate and we can pull logs
resource "exoscale_security_group" "sg_sks_nodes" {
    name        = "sg_sks_nodes-${var.name}"
    description = "Allows traffic between sks nodes and public pulling of logs"
}

resource "exoscale_security_group_rule" "sg_sks_nodes_logs_rule" {
    security_group_id = exoscale_security_group.sg_sks_nodes.id
    type              = "INGRESS"
    protocol          = "TCP"
    cidr              = "0.0.0.0/0"
    start_port        = 10250
    end_port          = 10250
}

resource "exoscale_security_group_rule" "sg_sks_nodes_calico" {
    security_group_id      = exoscale_security_group.sg_sks_nodes.id
    user_security_group_id = exoscale_security_group.sg_sks_nodes.id
    type                   = "INGRESS"
    protocol               = "UDP"
    start_port             = 4789
    end_port               = 4789
}

resource "exoscale_security_group_rule" "sg_sks_nodes_ccm" {
    security_group_id = exoscale_security_group.sg_sks_nodes.id
    type              = "INGRESS"
    protocol          = "TCP"
    start_port        = 30000
    end_port          = 32767
    cidr              = "0.0.0.0/0"
}
resource "exoscale_sks_cluster" "sks" {
    zone           = var.zone
    name           = var.name
    version        = var.kube_version
    description    = "Demo cluster ${var.name} / ${var.zone}"
    service_level  = "starter"
    cni            = "calico"
    exoscale_ccm   = true
    exoscale_csi   = true
    metrics_server = true
}
resource "exoscale_sks_nodepool" "workers" {
    zone               = var.zone
    cluster_id         = exoscale_sks_cluster.sks.id
    name               = "workers-${var.name}"
    instance_type      = var.worker_type
    size               = var.workers_number
    security_group_ids = [exoscale_security_group.sg_sks_nodes.id]
}
resource "exoscale_sks_kubeconfig" "sks_kubeconfig" {
    cluster_id = exoscale_sks_cluster.sks.id
    zone       = exoscale_sks_cluster.sks.zone
    user   = "kubernetes-admin"
    groups = ["system:masters"]
}

resource "local_sensitive_file" "sks_kubeconfig_file" {
    filename        = "kubeconfig"
    content         = exoscale_sks_kubeconfig.sks_kubeconfig.kubeconfig
    file_permission = "0600"
}

output "sks_kubeconfig" {
    value = local_sensitive_file.sks_kubeconfig_file.filename
}
variable "kube_version" {
    description = "Kubernetes version (1.21.0)"
    type        = string
    default     = "1.31.0"
}

variable "name" {
    description = "Name of the cluster"
    type        = string
    default     = "demo"
}

variable "workers_number" {
    description = "Number of workers in node pool"
    type        = number
    default     = 3
}

variable "worker_type" {
    type    = string
    default = "standard.medium"
}

variable "zone" {
    type    = string
    default = "ch-gva-2"
}
output "name" {
    value = var.name
}

output "zone" {
    value = var.zone
}

Next, we need to set env variables so that Terraform can use the Exoscale API.

export EXOSCALE_API_KEY=...
export EXOSCALE_API_SECRET=...

Next, we initialize terraform so it gets the correct version of the provider.

terraform init

We verify everything is correctly configured simulating the creation.

terraform plan

Then, if no error is raised, we create the cluster and the related resources.

terraform apply

A file named “kubeconfig” is created in the current folder. We can configure our local kubectl binary and access the cluster.

export KUBECONFIG=$PWD/kubeconfig
$ kubectl get nodes
NAME               STATUS   ROLES    AGE   VERSION
pool-92e0f-fqjys   Ready    <none>   2m    v1.31.0
pool-92e0f-ofcko   Ready    <none>   2m    v1.31.0
pool-92e0f-tygzv   Ready    <none>   2m    v1.31.0
⚠️

Once you are done using the cluster, don’t forget to delete it, and the associated resources, with the following command:

terraform destroy