11#! /bin/bash
22
3+ GREEN=' \033[1;32m'
4+ RED=' \033[1;31m'
5+ PURPLE=' \033[1;35m'
6+ NC=' \033[0m' # No Color
7+
38# Make sure we run from a consistent directory
49cd ~
510
611mkdir -p /eks-workshop/logs
712log_file=/eks-workshop/logs/action-$( date +%s) .log
813
14+ if [ -f /home/ec2-user/.banner-text ]; then
15+ cat /home/ec2-user/.banner-text
16+ fi
17+
918exec 7>&1
1019
1120logmessage () {
12- echo " $@ " >&7
13- echo " $@ " >&1
21+ echo -e " $@ " >&7
22+ echo -e " $@ " >&1
1423}
1524export -f logmessage
1625
2534if [ -z " $EKS_DEFAULT_MNG_MIN " ]; then
2635 EKS_CLUSTER_NAME=${EKS_CLUSTER_NAME:- " eks-workshop" }
2736
28- logmessage " Error: Please run 'use-cluster $EKS_CLUSTER_NAME '"
37+ logmessage " ${RED} Error:${NC} Please run 'use-cluster $EKS_CLUSTER_NAME '"
2938 exit 1
3039fi
3140
@@ -44,22 +53,24 @@ trap 'catch $? $LINENO' EXIT
4453
4554catch () {
4655 if [ " $1 " != " 0" ]; then
47- logmessage " An error occurred, please contact your workshop proctor or raise an issue at https://github.com/aws-samples/eks-workshop-v2/issues"
56+ logmessage " 🚨 An ${RED} error${NC} occurred, please contact your workshop proctor or raise an issue at https://github.com/aws-samples/eks-workshop-v2/issues"
4857 logmessage " The full log can be found here: $log_file "
4958 fi
5059 exec 3< & -
5160}
5261
5362mkdir -p /eks-workshop
5463
64+ logmessage " 🚀 We're preparing your environment for the next lab, sit tight!"
65+
5566REPOSITORY_REF=${REPOSITORY_REF:- " " }
5667
5768if [ ! -z " ${REPOSITORY_REF} " ]; then
5869 rm -f /home/ec2-user/environment/eks-workshop
5970 rm -rf $manifests_path
6071 rm -rf $repository_path
6172
62- logmessage " Refreshing copy of workshop repository from GitHub..."
73+ logmessage " 📦 Refreshing copy of workshop repository from GitHub..."
6374
6475 git clone --depth=1 --single-branch --branch=${REPOSITORY_REF} --quiet https://github.com/$REPOSITORY_OWNER /$REPOSITORY_NAME .git $repository_path
6576
@@ -83,8 +94,7 @@ if [ ! -z "$module" ]; then
8394 fi
8495fi
8596
86- logmessage " Resetting the environment..."
87- logmessage " Tip: Read the rest of the lab introduction while you wait!"
97+ logmessage " 🎓 ${PURPLE} Tip:${NC} Read the rest of the lab introduction while you wait!"
8898
8999if [ -f " /eks-workshop/hooks/cleanup.sh" ]; then
90100 bash /eks-workshop/hooks/cleanup.sh
@@ -108,7 +118,7 @@ kubectl apply -k $base_path --prune --all \
108118 --prune-allowlist=scheduling.k8s.io/v1/PriorityClass \
109119 --prune-allowlist=networking.k8s.io/v1/Ingress
110120
111- logmessage " Waiting for application to become ready..."
121+ logmessage " \n⏳ Waiting for application to become ready..."
112122
113123sleep 10
114124
@@ -129,7 +139,7 @@ export TF_VAR_eks_cluster_id="$EKS_CLUSTER_NAME"
129139
130140RESOURCES_PRECREATED=${RESOURCES_PRECREATED:- " false" }
131141
132- logmessage " Cleaning up previous lab infrastructure..."
142+ logmessage " \n🔃 Cleaning up previous lab infrastructure..."
133143
134144tf_dir=$( realpath --relative-to=" $PWD " ' /eks-workshop/terraform' )
135145
@@ -147,7 +157,7 @@ if [ ! -z "$module" ]; then
147157 fi
148158
149159 if [ -f " $module_path /.workshop/terraform/main.tf" ]; then
150- logmessage " Creating infrastructure for next lab..."
160+ logmessage " \n🚧 Creating infrastructure for next lab..."
151161
152162 cp -R $module_path /.workshop/terraform/* /eks-workshop/terraform/lab
153163
@@ -157,7 +167,7 @@ if [ ! -z "$module" ]; then
157167 terraform -chdir=" $tf_dir " apply -refresh=false --auto-approve
158168 elif [ -f " $module_path /.workshop/terraform/addon.tf" ]; then
159169 # This is the deprecated legacy code path that will be removed
160- logmessage " Creating infrastructure for next lab..."
170+ logmessage " \n🚧 Creating infrastructure for next lab..."
161171
162172 cp -R $module_path /.workshop/terraform/* /eks-workshop/terraform
163173
@@ -182,15 +192,15 @@ expected_size_config="$EKS_DEFAULT_MNG_MIN $EKS_DEFAULT_MNG_MAX $EKS_DEFAULT_MNG
182192mng_size_config=$( aws eks describe-nodegroup --cluster-name $EKS_CLUSTER_NAME --nodegroup-name $EKS_DEFAULT_MNG_NAME | jq -r ' .nodegroup.scalingConfig | "\(.minSize) \(.maxSize) \(.desiredSize)"' )
183193
184194if [[ " $mng_size_config " != " $expected_size_config " ]]; then
185- logmessage " Setting EKS Node Group back to initial sizing..."
195+ logmessage " \nSetting EKS Node Group back to initial sizing..."
186196
187197 WAIT_EXIT_CODE=0
188198
189199 # Wait for the node group to be active in case previous module cleanup didn't complete for some reason
190200 aws eks wait nodegroup-active --cluster-name $EKS_CLUSTER_NAME --nodegroup-name $EKS_DEFAULT_MNG_NAME || WAIT_EXIT_CODE=$?
191201
192202 if [ $WAIT_EXIT_CODE -ne 0 ]; then
193- logmessage " Default node group is still being modified unexpectedly, please try again."
203+ logmessage " ${RED} Error: ${NC} Default node group is still being modified unexpectedly, please try again."
194204 logmessage " If this error persists please contact your workshop proctor or raise an issue at https://github.com/aws-samples/eks-workshop-v2/issues"
195205 exit 1
196206 fi
205215asg_size_config=$( aws autoscaling describe-auto-scaling-groups --filters " Name=tag:eks:nodegroup-name,Values=$EKS_DEFAULT_MNG_NAME " " Name=tag:eks:cluster-name,Values=$EKS_CLUSTER_NAME " | jq -r ' .AutoScalingGroups[0] | "\(.MinSize) \(.MaxSize) \(.DesiredCapacity)"' )
206216
207217if [[ " $asg_size_config " != " $expected_size_config " ]]; then
208- logmessage " Setting ASG back to initial sizing..."
218+ logmessage " \nSetting ASG back to initial sizing..."
209219
210220 export ASG_NAME=$( aws autoscaling describe-auto-scaling-groups --filters " Name=tag:eks:nodegroup-name,Values=$EKS_DEFAULT_MNG_NAME " " Name=tag:eks:cluster-name,Values=$EKS_CLUSTER_NAME " --query " AutoScalingGroups[0].AutoScalingGroupName" --output text)
211221 aws autoscaling update-auto-scaling-group \
@@ -223,7 +233,7 @@ timeout -s TERM 300 bash -c \
223233 done' || EXIT_CODE=$?
224234
225235if [ $EXIT_CODE -ne 0 ]; then
226- >&2 echo " Error: Nodes did not scale back to 3"
236+ >&2 echo " ${RED} Error:${NC} Nodes did not scale back to 3"
227237 exit 1
228238fi
229239
@@ -234,4 +244,4 @@ kubectl delete pod -l app.kubernetes.io/created-by=eks-workshop -l app.kubernete
234244kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A
235245
236246# Finished
237- logmessage ' Environment is ready'
247+ logmessage " \n✅ Environment is ${GREEN} ready${NC} ! "
0 commit comments