-
Notifications
You must be signed in to change notification settings - Fork 0
/
clusters.tf
264 lines (199 loc) · 9.5 KB
/
clusters.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
/*
This file contains all the resources to maintain GKE clusters & it's nodes
GKE Shielded nodes:
https://cloud.google.com/kubernetes-engine/docs/how-to/shielded-gke-nodes
*/
######## Production
# Create a GKE clusteer for production
resource "google_container_cluster" "production" {
provider = google-beta
name = local.production_cluster_name
location = var.region
project = google_project.production.project_id
network = google_compute_network.main-vpc.self_link
subnetwork = google_compute_subnetwork.production.self_link
remove_default_node_pool = true
initial_node_count = local.production_node_pool_initial_node_count
enable_shielded_nodes = true
# GKE release channels:
# https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels
release_channel {
channel = local.production_cluster_release_channel
}
# allows Kubernetes service accounts to act as a user-managed Google IAM Service Account.
# https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#workload_identity_config
workload_identity_config {
# https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#identity_namespace
identity_namespace = "${google_project.production.project_id}.svc.id.goog"
}
# Define sources for ip-addresses for pods & services
# https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#ip_allocation_policy
ip_allocation_policy {
cluster_secondary_range_name = local.production_pod_ip_range_name
services_secondary_range_name = local.production_services_ip_range_name
}
# GKE private cluster
# In a private cluster, nodes only have internal IP addresses,
# which means that nodes and Pods are isolated from the internet by default.
# https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters
private_cluster_config {
enable_private_nodes = true
enable_private_endpoint = false
# specifies an internal IP address range for the control plan.
# This setting is permanent for this cluster.
master_ipv4_cidr_block = local.production_master_ipv4_cidr_block
}
# https://cloud.google.com/kubernetes-engine/docs/how-to/authorized-networks
# specifies that access to the public endpoint is restricted to IP address ranges that you authorize.
#
# nodes and containers are already shielded from internet.So, all ips can be allowed access
master_authorized_networks_config {
cidr_blocks {
cidr_block = local.production_master_authorized_networks_config_1_cidr_block
display_name = local.production_master_authorized_networks_config_1_display_name
}
}
# Configuration for the Google Groups for GKE feature. Structure is documented below.
# https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#groups-setup-gsuite
authenticator_groups_config {
security_group = var.production_gke_security_group_name
}
}
resource "google_container_node_pool" "production_node_pool" {
provider = google-beta
name = "${local.production_cluster_name}-node-pool"
location = var.region
initial_node_count = local.production_node_pool_initial_node_count
cluster = google_container_cluster.production.name
project = google_project.production.project_id
autoscaling {
min_node_count = local.production_node_pool_autoscaling_min_node_count
max_node_count = local.production_node_pool_autoscaling_max_node_count
}
management {
auto_repair = local.production_node_pool_auto_repair
auto_upgrade = local.production_node_pool_auto_upgrade
}
upgrade_settings {
max_surge = local.production_node_pool_max_surge
max_unavailable = local.production_node_pool_max_unavailable
}
node_config {
# https://cloud.google.com/compute/docs/machine-types#recommendations_for_machine_types
machine_type = local.production_node_pool_machine_type
image_type = "COS_CONTAINERD"
# https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#oauth_scopes
oauth_scopes = local.production_node_pool_oauth_scopes
shielded_instance_config {
enable_secure_boot = true
}
# With Workload Identity, you can configure a Kubernetes service account to act as a Google service account.
#
# Pods running as the Kubernetes service account will automatically authenticate as the Google service account
# when accessing Google Cloud APIs.
#
# This enables you to assign distinct, fine-grained identities and authorization for each application in your cluster.
#
# Service account is refered in the clusters workload_identity_config
workload_metadata_config {
node_metadata = "GKE_METADATA_SERVER"
}
}
}
######## Test
# Create a GKE clusteer for test
resource "google_container_cluster" "test" {
provider = google-beta
name = local.test_cluster_name
location = var.region
project = google_project.test.project_id
network = google_compute_network.main-vpc.self_link
subnetwork = google_compute_subnetwork.test.self_link
remove_default_node_pool = true
initial_node_count = local.test_node_pool_initial_node_count
enable_shielded_nodes = true
# GKE release channels:
# https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels
release_channel {
channel = local.test_cluster_release_channel
}
# allows Kubernetes service accounts to act as a user-managed Google IAM Service Account.
# https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#workload_identity_config
workload_identity_config {
# https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#identity_namespace
identity_namespace = "${google_project.test.project_id}.svc.id.goog"
}
# Define sources for ip-addresses for pods & services
# https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#ip_allocation_policy
ip_allocation_policy {
cluster_secondary_range_name = local.test_pod_ip_range_name
services_secondary_range_name = local.test_services_ip_range_name
}
# GKE private cluster
# In a private cluster, nodes only have internal IP addresses,
# which means that nodes and Pods are isolated from the internet by default.
# https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters
private_cluster_config {
enable_private_nodes = true
enable_private_endpoint = false
# specifies an internal IP address range for the control plan.
# This setting is permanent for this cluster.
master_ipv4_cidr_block = local.test_master_ipv4_cidr_block
}
# https://cloud.google.com/kubernetes-engine/docs/how-to/authorized-networks
# specifies that access to the public endpoint is restricted to IP address ranges that you authorize.
#
# nodes and containers are already shielded from internet.So, all ips can be allowed access
master_authorized_networks_config {
cidr_blocks {
cidr_block = local.test_master_authorized_networks_config_1_cidr_block
display_name = local.test_master_authorized_networks_config_1_display_name
}
}
# Configuration for the Google Groups for GKE feature. Structure is documented below.
# https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#groups-setup-gsuite
authenticator_groups_config {
security_group = var.test_gke_security_group_name
}
}
resource "google_container_node_pool" "test_node_pool" {
provider = google-beta
name = "${local.test_cluster_name}-node-pool"
location = var.region
initial_node_count = local.test_node_pool_initial_node_count
cluster = google_container_cluster.test.name
project = google_project.test.project_id
autoscaling {
min_node_count = local.test_node_pool_autoscaling_min_node_count
max_node_count = local.test_node_pool_autoscaling_max_node_count
}
management {
auto_repair = local.test_node_pool_auto_repair
auto_upgrade = local.test_node_pool_auto_upgrade
}
upgrade_settings {
max_surge = local.test_node_pool_max_surge
max_unavailable = local.test_node_pool_max_unavailable
}
node_config {
# https://cloud.google.com/compute/docs/machine-types#recommendations_for_machine_types
machine_type = local.test_node_pool_machine_type
image_type = "COS_CONTAINERD"
# https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#oauth_scopes
oauth_scopes = local.test_node_pool_oauth_scopes
shielded_instance_config {
enable_secure_boot = true
}
# With Workload Identity, you can configure a Kubernetes service account to act as a Google service account.
#
# Pods running as the Kubernetes service account will automatically authenticate as the Google service account
# when accessing Google Cloud APIs.
#
# This enables you to assign distinct, fine-grained identities and authorization for each application in your cluster.
#
# Service account is refered in the clusters workload_identity_config
workload_metadata_config {
node_metadata = "GKE_METADATA_SERVER"
}
}
}