forked from cloudposse/terraform-aws-eks-node-group
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.tf
101 lines (81 loc) · 4.2 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
provider "aws" {
region = var.region
}
module "label" {
source = "git::https://github.com/cloudposse/terraform-null-label.git?ref=tags/0.19.2"
# This is the preferred way to add attributes. It will put "cluster" first
# before any attributes set in `var.attributes` or `context.attributes`.
# In this case, we do not care, because we are only using this instance
# of this module to create tags.
attributes = ["cluster"]
context = module.this.context
}
locals {
# The usage of the specific kubernetes.io/cluster/* resource tags below are required
# for EKS and Kubernetes to discover and manage networking resources
# https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html#base-vpc-networking
tags = merge(module.label.tags, map("kubernetes.io/cluster/${module.label.id}", "shared"))
# Unfortunately, most_recent (https://github.com/cloudposse/terraform-aws-eks-workers/blob/34a43c25624a6efb3ba5d2770a601d7cb3c0d391/main.tf#L141)
# variable does not work as expected, if you are not going to use custom ami you should
# enforce usage of eks_worker_ami_name_filter variable to set the right kubernetes version for EKS workers,
# otherwise will be used the first version of Kubernetes supported by AWS (v1.11) for EKS workers but
# EKS control plane will use the version specified by kubernetes_version variable.
eks_worker_ami_name_filter = "amazon-eks-node-${var.kubernetes_version}*"
}
module "vpc" {
source = "git::https://github.com/cloudposse/terraform-aws-vpc.git?ref=tags/0.17.0"
cidr_block = "172.16.0.0/16"
tags = local.tags
context = module.this.context
}
module "subnets" {
source = "git::https://github.com/cloudposse/terraform-aws-dynamic-subnets.git?ref=tags/0.28.0"
availability_zones = var.availability_zones
vpc_id = module.vpc.vpc_id
igw_id = module.vpc.igw_id
cidr_block = module.vpc.vpc_cidr_block
nat_gateway_enabled = false
nat_instance_enabled = false
tags = local.tags
context = module.this.context
}
module "eks_cluster" {
source = "git::https://github.com/cloudposse/terraform-aws-eks-cluster.git?ref=tags/0.28.0"
region = var.region
vpc_id = module.vpc.vpc_id
subnet_ids = module.subnets.public_subnet_ids
kubernetes_version = var.kubernetes_version
local_exec_interpreter = var.local_exec_interpreter
oidc_provider_enabled = var.oidc_provider_enabled
enabled_cluster_log_types = var.enabled_cluster_log_types
cluster_log_retention_period = var.cluster_log_retention_period
context = module.this.context
}
# Ensure ordering of resource creation to eliminate the race conditions when applying the Kubernetes Auth ConfigMap.
# Do not create Node Group before the EKS cluster is created and the `aws-auth` Kubernetes ConfigMap is applied.
# Otherwise, EKS will create the ConfigMap first and add the managed node role ARNs to it,
# and the kubernetes provider will throw an error that the ConfigMap already exists (because it can't update the map, only create it).
# If we create the ConfigMap first (to add additional roles/users/accounts), EKS will just update it by adding the managed node role ARNs.
data "null_data_source" "wait_for_cluster_and_kubernetes_configmap" {
inputs = {
cluster_name = module.eks_cluster.eks_cluster_id
kubernetes_config_map_id = module.eks_cluster.kubernetes_config_map_id
}
}
module "eks_node_group" {
source = "../../"
subnet_ids = module.subnets.public_subnet_ids
cluster_name = data.null_data_source.wait_for_cluster_and_kubernetes_configmap.outputs["cluster_name"]
instance_types = var.instance_types
desired_size = var.desired_size
min_size = var.min_size
max_size = var.max_size
kubernetes_version = var.kubernetes_version
kubernetes_labels = var.kubernetes_labels
disk_size = var.disk_size
bootstrap_extra_args = var.bootstrap_extra_args
kubelet_extra_args = var.kubelet_extra_args
before_cluster_joining_userdata = var.before_cluster_joining_userdata
after_cluster_joining_userdata = var.after_cluster_joining_userdata
context = module.this.context
}