diff --git a/.github/workflows/terraform-examples.yml b/.github/workflows/terraform-examples.yml index 25f812e..b9ab353 100644 --- a/.github/workflows/terraform-examples.yml +++ b/.github/workflows/terraform-examples.yml @@ -9,7 +9,7 @@ on: jobs: - terraform-tflint: + terraform-check-tflint: runs-on: ubuntu-latest steps: - name: Check out code diff --git a/Makefile b/Makefile index 5273da4..669f282 100644 --- a/Makefile +++ b/Makefile @@ -1,14 +1,18 @@ default: help -.PHONY: terraform-tflint -terraform-tflint: ## Run 'terraform-tflint' github actions with https://github.com/nektos/act - act -j terraform-tflint +.PHONY: terraform-check-tflint +terraform-check-tflint: ## Run 'terraform-check-tflint' github actions with https://github.com/nektos/act + act -j terraform-check-tflint .PHONY: check-terraform-examples terraform-check-examples: ## Run specific 'check' github actions with https://github.com/nektos/act act -j terraform-check-fmt act -j terraform-check-variables-tailscale-install-scripts +.PHONY: terraform-fmt +terraform-fmt: ## Run 'terraform-fmt' github actions with https://github.com/nektos/act + terraform fmt -recursive + .PHONY: help help: ## Display this information. Default target. @echo "Valid targets:" diff --git a/terraform/aws/aws-eks-operator/README.md b/terraform/aws/aws-eks-operator/README.md new file mode 100644 index 0000000..c4480b8 --- /dev/null +++ b/terraform/aws/aws-eks-operator/README.md @@ -0,0 +1,84 @@ +# aws-eks-operator + +This example creates the following: + +- a VPC and related resources including a NAT Gateway +- an EKS cluster with a managed node group +- a Kubernetes namespace for the [Tailscale operator](https://tailscale.com/kb/1236/kubernetes-operator) +- the Tailscale Kubernetes Operator deployed via [Helm](https://tailscale.com/kb/1236/kubernetes-operator#helm) + +## Considerations + +- The EKS cluster is configured with both public and private API server access for flexibility +- The Tailscale operator is deployed in a dedicated `tailscale` namespace +- The operator will create a Tailscale device for API server proxy access +- Any additional Tailscale resources (like ingress controllers) created by the operator will appear in your Tailnet + +## Prerequisites + +- Create a [Tailscale OAuth Client](https://tailscale.com/kb/1215/oauth-clients#setting-up-an-oauth-client) with appropriate scopes +- Ensure you have AWS CLI configured with appropriate permissions for EKS +- Install `kubectl` for cluster access after deployment + +## To use + +Follow the documentation to configure the Terraform providers: + +- [AWS](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) +- [Kubernetes](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs) +- [Helm](https://registry.terraform.io/providers/hashicorp/helm/latest/docs) + +### Configure variables + +Create a `terraform.tfvars` file with your Tailscale OAuth credentials: + +```hcl +tailscale_oauth_client_id = "your-oauth-client-id" +tailscale_oauth_client_secret = "your-oauth-client-secret" +``` + +### Deploy + +```shell +terraform init +terraform apply +``` + +#### Verify deployment + +After deployment, configure kubectl to access your cluster: + +```shell +aws eks update-kubeconfig --region $AWS_REGION --name $(terraform output -raw cluster_name) +``` + +Check that the Tailscale operator is running: + +```shell +kubectl get pods -n tailscale +kubectl logs -n tailscale -l app.kubernetes.io/name=tailscale-operator +``` + +#### Verify connectivity via the [API server proxy](https://tailscale.com/kb/1437/kubernetes-operator-api-server-proxy) + +After deployment, configure kubectl to access your cluster using Tailscale: + +```shell +tailscale configure kubeconfig ${terraform output -raw operator_name} +``` + +```shell +kubectl get pods -n tailscale +``` + +## To destroy + +```shell +terraform destroy + +# remove leftover Tailscale devices at https://login.tailscale.com/admin/machines +``` + +## Limitations + +- The [HA API server proxy](https://tailscale.com/kb/1437/kubernetes-operator-api-server-proxy#configuring-a-high-availability-api-server-proxy) is deployed using a [terraform null_resource](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) instead of [kubernetes_manifest](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest.html) due to a Terraform limitation that results in `cannot create REST client: no client config` errors on first run. diff --git a/terraform/aws/aws-eks-operator/data.tf b/terraform/aws/aws-eks-operator/data.tf new file mode 100644 index 0000000..2502393 --- /dev/null +++ b/terraform/aws/aws-eks-operator/data.tf @@ -0,0 +1 @@ +data "aws_region" "current" {} diff --git a/terraform/aws/aws-eks-operator/main.tf b/terraform/aws/aws-eks-operator/main.tf new file mode 100644 index 0000000..23af13b --- /dev/null +++ b/terraform/aws/aws-eks-operator/main.tf @@ -0,0 +1,171 @@ +locals { + name = "example-${basename(path.cwd)}" + + aws_tags = { + Name = local.name + } + + # Modify these to use your own VPC + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + + # EKS cluster configuration + cluster_version = "1.34" # TODO: omit this? + node_instance_type = "t3.medium" + desired_size = 2 + max_size = 2 + min_size = 1 + + # Tailscale Operator configuration + namespace_name = "tailscale" + operator_name = local.name + operator_version = "1.92.4" + tailscale_oauth_client_id = var.tailscale_oauth_client_id + tailscale_oauth_client_secret = var.tailscale_oauth_client_secret +} + +# Remove this to use your own VPC. +module "vpc" { + source = "../internal-modules/aws-vpc" + + name = local.name + tags = local.aws_tags +} + +module "eks" { + source = "terraform-aws-modules/eks/aws" + version = ">= 21.0, < 22.0" + + name = local.name + kubernetes_version = local.cluster_version + + addons = { + coredns = {} + eks-pod-identity-agent = { + before_compute = true + } + kube-proxy = {} + vpc-cni = { + before_compute = true + } + } + + # Once the Tailscale operator is installed, `endpoint_public_access` can be disabled. + # This is left enabled for the sake of easy adoption. + endpoint_public_access = true + + # Optional: Adds the current caller identity as an administrator via cluster access entry + enable_cluster_creator_admin_permissions = true + + vpc_id = local.vpc_id + subnet_ids = local.subnet_ids + + eks_managed_node_groups = { + main = { + name = local.name + instance_types = [local.node_instance_type] + + desired_size = local.desired_size + max_size = local.max_size + min_size = local.min_size + } + } + + tags = local.aws_tags +} + +resource "kubernetes_namespace_v1" "tailscale_operator" { + provider = kubernetes.this + + metadata { + name = local.namespace_name + labels = { + "pod-security.kubernetes.io/enforce" = "privileged" + } + } +} + +# +# https://tailscale.com/kb/1236/kubernetes-operator#helm +# +resource "helm_release" "tailscale_operator" { + provider = helm.this + + name = local.operator_name + namespace = kubernetes_namespace_v1.tailscale_operator.metadata[0].name + + repository = "https://pkgs.tailscale.com/helmcharts" + chart = "tailscale-operator" + version = local.operator_version + + values = [ + yamlencode({ + operatorConfig = { + image = { + repo = "tailscale/k8s-operator" + tag = "v${local.operator_version}" + } + hostname = local.operator_name + } + apiServerProxyConfig = { + mode = true + tags = "tag:k8s-operator,tag:k8s-api-server" + } + }) + ] + + set_sensitive = [ + { + name = "oauth.clientId" + value = local.tailscale_oauth_client_id + }, + { + name = "oauth.clientSecret" + value = local.tailscale_oauth_client_secret + }, + ] + + depends_on = [ + module.eks, + ] +} + +# +# https://tailscale.com/kb/1437/kubernetes-operator-api-server-proxy#configuring-a-high-availability-api-server-proxy +# +resource "null_resource" "kubectl_ha_proxy" { + count = 1 # Change to 0 to destroy. Commenting or removing the resource will not run the destroy provisioners. + triggers = { + region = data.aws_region.current.region + cluster_arn = module.eks.cluster_arn + cluster_name = module.eks.cluster_name + operator_name = helm_release.tailscale_operator.name + } + + # + # Create provisioners + # + provisioner "local-exec" { + command = "aws eks update-kubeconfig --region ${self.triggers.region} --name ${self.triggers.cluster_name}" + } + provisioner "local-exec" { + command = "OPERATOR_NAME=${self.triggers.operator_name} envsubst < ${path.module}/tailscale-api-server-ha-proxy.yaml | kubectl apply --context=${self.triggers.cluster_arn} -f -" + } + + # + # Destroy provisioners + # + provisioner "local-exec" { + when = destroy + command = "aws eks update-kubeconfig --region ${self.triggers.region} --name ${self.triggers.cluster_name}" + } + provisioner "local-exec" { + when = destroy + command = "OPERATOR_NAME=${self.triggers.operator_name} envsubst < ${path.module}/tailscale-api-server-ha-proxy.yaml | kubectl delete --context=${self.triggers.cluster_arn} -f -" + } + + depends_on = [ + module.vpc, # prevent network changes before this finishes during a destroy + helm_release.tailscale_operator, + ] +} diff --git a/terraform/aws/aws-eks-operator/outputs.tf b/terraform/aws/aws-eks-operator/outputs.tf new file mode 100644 index 0000000..3a58275 --- /dev/null +++ b/terraform/aws/aws-eks-operator/outputs.tf @@ -0,0 +1,39 @@ +output "vpc_id" { + description = "VPC ID where the EKS cluster is deployed" + value = module.vpc.vpc_id +} + +output "cluster_name" { + description = "EKS cluster name" + value = module.eks.cluster_name +} + +output "operator_namespace" { + description = "Kubernetes namespace where Tailscale operator is deployed" + value = kubernetes_namespace_v1.tailscale_operator.metadata[0].name +} + +output "operator_name" { + description = "Configured name of the Tailscale operator" + value = helm_release.tailscale_operator.name +} + +output "cmd_kubeconfig_tailscale" { + description = "Command to configure kubeconfig for Tailscale access to the EKS cluster" + value = "tailscale configure kubeconfig ${helm_release.tailscale_operator.name}" +} + +output "cmd_kubeconfig_aws" { + description = "Command to configure kubeconfig for public access to the EKS cluster" + value = "aws eks update-kubeconfig --region ${data.aws_region.current.region} --name ${module.eks.cluster_name}" +} + +output "cmd_kubectl_ha_proxy_apply" { + description = "Command to deploy the Tailscale high availability API server proxy - https://tailscale.com/kb/1437/kubernetes-operator-api-server-proxy#configuring-a-high-availability-api-server-proxy" + value = "OPERATOR_NAME=${helm_release.tailscale_operator.name} envsubst < tailscale-api-server-ha-proxy.yaml | kubectl apply -f -" +} + +output "cmd_kubectl_ha_proxy_delete" { + description = "Command to delete the Tailscale high availability API server proxy - https://tailscale.com/kb/1437/kubernetes-operator-api-server-proxy#configuring-a-high-availability-api-server-proxy" + value = "OPERATOR_NAME=${helm_release.tailscale_operator.name} envsubst < tailscale-api-server-ha-proxy.yaml | kubectl delete -f -" +} diff --git a/terraform/aws/aws-eks-operator/tailscale-api-server-ha-proxy.yaml b/terraform/aws/aws-eks-operator/tailscale-api-server-ha-proxy.yaml new file mode 100644 index 0000000..582abe7 --- /dev/null +++ b/terraform/aws/aws-eks-operator/tailscale-api-server-ha-proxy.yaml @@ -0,0 +1,11 @@ +# https://tailscale.com/kb/1437/kubernetes-operator-api-server-proxy#configuring-a-high-availability-api-server-proxy +apiVersion: tailscale.com/v1alpha1 +kind: ProxyGroup +metadata: + name: ${OPERATOR_NAME}-ha +spec: + type: kube-apiserver + replicas: 2 + tags: ["tag:k8s"] + kubeAPIServer: + mode: auth diff --git a/terraform/aws/aws-eks-operator/variables.tf b/terraform/aws/aws-eks-operator/variables.tf new file mode 100644 index 0000000..e9a505f --- /dev/null +++ b/terraform/aws/aws-eks-operator/variables.tf @@ -0,0 +1,21 @@ +variable "tailscale_oauth_client_id" { + description = "Tailscale OAuth client ID" + type = string + sensitive = true + + validation { + condition = length(var.tailscale_oauth_client_id) > 0 + error_message = "Tailscale OAuth client ID must not be empty." + } +} + +variable "tailscale_oauth_client_secret" { + description = "Tailscale OAuth client secret" + type = string + sensitive = true + + validation { + condition = length(var.tailscale_oauth_client_secret) > 0 + error_message = "Tailscale OAuth client secret must not be empty." + } +} diff --git a/terraform/aws/aws-eks-operator/versions.tf b/terraform/aws/aws-eks-operator/versions.tf new file mode 100644 index 0000000..482be23 --- /dev/null +++ b/terraform/aws/aws-eks-operator/versions.tf @@ -0,0 +1,48 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 6.0, < 7.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 3.0.1, < 4.0" + } + helm = { + source = "hashicorp/helm" + version = ">= 3.1.1, < 4.0" + } + null = { + source = "hashicorp/null" + version = ">= 3.2.0, < 4.0" + } + } +} + +provider "kubernetes" { + alias = "this" + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } +} + +provider "helm" { + alias = "this" + kubernetes = { + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) + + exec = { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } + } +}