You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
enhancement: Add spot instance and managed node group support (#200)
* enhancement: Add spot instance and managed node group support
* chore: fix references to map functions removed in tf 0.15
* fix: Pin the terraform version in the validation gha workflow because of issues with submodules when using TF 0.15
Copy file name to clipboardExpand all lines: templates/kubernetes/terraform/environments/prod/main.tf
+4Lines changed: 4 additions & 0 deletions
Original file line number
Diff line number
Diff line change
@@ -106,4 +106,8 @@ module "kubernetes" {
106
106
# Should not be less than 2 for production. 2 can handle a significant amount of traffic and should give a reasonable amount of redundancy in the case of
107
107
# needing to do deployments of the controller or unexpected termination of a node with a controller pod on it.
108
108
nginx_ingress_replicas=2
109
+
110
+
# The Node Termination Handler should be enabled when using spot instances in your cluster, as it is responsible for gracefully draining a node that is due to be terminated.
111
+
# It can also be used to cleanly handle scheduled maintenance events on On-Demand instances, though it runs as a daemonset, so will run 1 pod on each node in your cluster.
Copy file name to clipboardExpand all lines: templates/kubernetes/terraform/environments/stage/main.tf
+4Lines changed: 4 additions & 0 deletions
Original file line number
Diff line number
Diff line change
@@ -103,4 +103,8 @@ module "kubernetes" {
103
103
cache_store="<% index .Params `cacheStore` %>"
104
104
105
105
nginx_ingress_replicas=1
106
+
107
+
# The Node Termination Handler should be enabled when using spot instances in your cluster, as it is responsible for gracefully draining a node that is due to be terminated.
108
+
# It can also be used to cleanly handle scheduled maintenance events on On-Demand instances, though it runs as a daemonset, so will run 1 pod on each node in your cluster.
description="The Node Termination Handler should be enabled when using spot instances in your cluster, as it is responsible for gracefully draining a node that is due to be terminated. It can also be used to cleanly handle scheduled maintenance events on On-Demand instances, though it runs as a daemonset, so will run 1 pod on each node in your cluster"
Copy file name to clipboardExpand all lines: templates/terraform/environments/prod/main.tf
+13-14Lines changed: 13 additions & 14 deletions
Original file line number
Diff line number
Diff line change
@@ -29,7 +29,7 @@ provider "aws" {
29
29
allowed_account_ids=[local.account_id]
30
30
}
31
31
32
-
# remote state of "shared"
32
+
# remote state of "shared" - contains mostly IAM users that will be shared between environments
33
33
data"terraform_remote_state""shared" {
34
34
backend="s3"
35
35
config={
@@ -56,14 +56,11 @@ module "prod" {
56
56
ecr_repositories=[] # Should be created by the staging environment
57
57
58
58
# EKS configuration
59
-
eks_cluster_version="1.18"
60
-
eks_worker_instance_type="t3.medium"
61
-
eks_worker_asg_min_size=2
62
-
eks_worker_asg_max_size=4
63
-
64
-
# EKS-Optimized AMI for your region: https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html
65
-
# https://<% index .Params `region` %>.console.aws.amazon.com/systems-manager/parameters/%252Faws%252Fservice%252Feks%252Foptimized-ami%252F1.18%252Famazon-linux-2%252Frecommended%252Fimage_id/description?region=<% index .Params `region` %>
66
-
eks_worker_ami="<% index .Params `eksWorkerAMI` %>"
59
+
eks_cluster_version="1.19"
60
+
eks_worker_instance_types=["t3.medium"]
61
+
eks_worker_asg_min_size=2
62
+
eks_worker_asg_max_size=4
63
+
eks_use_spot_instances=false
67
64
68
65
# Hosting configuration. Each domain will have a bucket created for it, but may have mulitple aliases pointing to the same bucket.
69
66
# Note that because of the way terraform handles lists, new records should be added to the end of the list.
@@ -101,11 +98,11 @@ module "prod" {
101
98
102
99
# Logging configuration
103
100
logging_type="<% index .Params `loggingType` %>"
104
-
<% if ne (index .Params `loggingType`) "kibana"%># <% end %>logging_es_version = "7.9"
105
-
<% if ne (index .Params `loggingType`) "kibana"%># <% end %>logging_az_count = "2"
106
-
<% if ne (index .Params `loggingType`) "kibana"%># <% end %>logging_es_instance_type = "m5.large.elasticsearch"
107
-
<% if ne (index .Params `loggingType`) "kibana"%># <% end %>logging_es_instance_count = "2" # Must be a mulitple of the az count
108
-
<% if ne (index .Params `loggingType`) "kibana"%># <% end %>logging_volume_size_in_gb = "50" # Maximum value is limited by the instance type
101
+
<% if ne (index .Params `loggingType`) "kibana"%># <% end %>logging_es_version = "7.9"
102
+
<% if ne (index .Params `loggingType`) "kibana"%># <% end %>logging_az_count = "2"
103
+
<% if ne (index .Params `loggingType`) "kibana"%># <% end %>logging_es_instance_type = "t2.medium.elasticsearch" # The next larger instance type is "m5.large.elasticsearch" - upgrading an existing cluster may require fully recreating though, as m5.large is the first instance size which supports disk encryption
104
+
<% if ne (index .Params `loggingType`) "kibana"%># <% end %>logging_es_instance_count = "2" # Must be a mulitple of the az count
105
+
<% if ne (index .Params `loggingType`) "kibana"%># <% end %>logging_volume_size_in_gb = "35" # Maximum value is limited by the instance type
109
106
<% if ne (index .Params `loggingType`) "kibana"%># <% end %>logging_create_service_role = false # If in the same AWS account, this would have already been created by the staging env
110
107
# See https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-limits.html
111
108
@@ -118,9 +115,11 @@ module "prod" {
118
115
## Check https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/SelectEngine.html to compare redis or memcached.
119
116
cache_store="<% index .Params `cacheStore` %>"
120
117
118
+
<% if ne (index .Params `cacheStore`) "none"%>
121
119
## See how to define node and instance type: https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/nodes-select-size.html
0 commit comments