From 04243a12340f1f1994e17eb576dbab906a75e5c8 Mon Sep 17 00:00:00 2001 From: Eric Kong Date: Thu, 7 Aug 2025 23:25:03 -0700 Subject: [PATCH 1/2] added corbench code without manifest files Signed-off-by: Eric Kong --- .gitignore | 6 + README.md | 19 +- corbench/Dockerfile | 16 + corbench/Makefile | 105 ++ corbench/README.md | 16 + corbench/docs/Blank diagram (2).png | Bin 0 -> 83980 bytes corbench/docs/deployment.md | 417 +++++++ corbench/gp2-csi-storageclass.yaml | 10 + corbench/infra/Dockerfile | 18 + corbench/infra/README.md | 98 ++ corbench/infra/infra.go | 91 ++ corbench/pkg/provider/eks/eks.go | 589 +++++++++ corbench/pkg/provider/k8s/k8s.go | 1557 ++++++++++++++++++++++++ corbench/pkg/provider/provider.go | 146 +++ corbench/pkg/provider/provider_test.go | 59 + corbench/setup-ebs-csi.sh | 104 ++ scripts/README.md | 2 + 17 files changed, 3252 insertions(+), 1 deletion(-) create mode 100644 corbench/Dockerfile create mode 100644 corbench/Makefile create mode 100644 corbench/README.md create mode 100644 corbench/docs/Blank diagram (2).png create mode 100644 corbench/docs/deployment.md create mode 100644 corbench/gp2-csi-storageclass.yaml create mode 100644 corbench/infra/Dockerfile create mode 100644 corbench/infra/README.md create mode 100644 corbench/infra/infra.go create mode 100644 corbench/pkg/provider/eks/eks.go create mode 100644 corbench/pkg/provider/k8s/k8s.go create mode 100644 corbench/pkg/provider/provider.go create mode 100644 corbench/pkg/provider/provider_test.go create mode 100755 corbench/setup-ebs-csi.sh diff --git a/.gitignore b/.gitignore index aaadf73..a29378d 100644 --- a/.gitignore +++ b/.gitignore @@ -30,3 +30,9 @@ go.work.sum # Editor/IDE # .idea/ # .vscode/ + +# Authentication file +auth_file.yaml + +# Infra binary +corbench/infra/infra diff --git a/README.md b/README.md index 972b492..f254706 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,21 @@ # Corbench Benchmarking tool for cortex -Refer to [corbench/README.md](corbench/README.md) +Refer to [corbench/README.md](corbench/README.md) for documetation on how to deploy the tool, and how the tool works. + +## Tool Usage + +Create a PR in the Cortex repository, and you are able to run the following commands in the comments of a PR + +**Available Commands:** +- To start benchmark: `/corbench ` +- To stop benchmark: `/corbench cancel` +- To print help: `/corbench help` + +**Cortex tag format:** `master-` (e.g., `master-6b3bd7b`) This will be the version of cortex that your PR will be benchmarked against +- MAKE SURE TO PICK AN EXISTING CORTEX TAG FROM BELOW LINK, OR DEPLOYMENT WILL BE STUCK AND YOU WILL HAVE TO CANCEL AND START AGAIN +- Check what cortex tags are avaliable to choose from at https://hub.docker.com/r/cortexproject/cortex/tags + +**Examples:** +- `/corbench master-6b3bd7b` +- `/corbench master-9861229` \ No newline at end of file diff --git a/corbench/Dockerfile b/corbench/Dockerfile new file mode 100644 index 0000000..44840da --- /dev/null +++ b/corbench/Dockerfile @@ -0,0 +1,16 @@ +FROM --platform=linux/amd64 golang:1.15-alpine + +WORKDIR /corbench + +RUN apk add git make + +# Copy Makefiles and manifests +# Need 'cd' since ghActions ignores WORKDIR +# Need 'eval' to prevent bash keywords be run as commands +COPY ./ ./ + +RUN echo -e '#!/bin/sh\ncd /corbench\neval "$@"' >/bin/docker_entrypoint + +RUN chmod u+x /bin/docker_entrypoint + +ENTRYPOINT ["docker_entrypoint"] \ No newline at end of file diff --git a/corbench/Makefile b/corbench/Makefile new file mode 100644 index 0000000..6509591 --- /dev/null +++ b/corbench/Makefile @@ -0,0 +1,105 @@ +INFRA_CMD ?= ./infra/infra +PROVIDER ?= eks +MANIFEST_FILE = c-manifests + +cluster_create: + ${INFRA_CMD} ${PROVIDER} cluster create -a ${AUTH_FILE} \ + -v ZONE:${ZONE} -v GKE_PROJECT_ID:${GKE_PROJECT_ID} \ + -v EKS_WORKER_ROLE_ARN:${EKS_WORKER_ROLE_ARN} -v EKS_CLUSTER_ROLE_ARN:${EKS_CLUSTER_ROLE_ARN} \ + -v EKS_SUBNET_IDS:${EKS_SUBNET_IDS} -v SEPARATOR:${SEPARATOR} \ + -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \ + -v AWS_ACCOUNT_ID:$$(echo ${EKS_WORKER_ROLE_ARN} | cut -d':' -f5) \ + -f ${MANIFEST_FILE}/cluster_${PROVIDER}.yaml +ifeq (${PROVIDER},eks) + ./setup-ebs-csi.sh +endif + +cluster_resource_apply: + ${INFRA_CMD} ${PROVIDER} resource apply -a ${AUTH_FILE} \ + -v ZONE:${ZONE} -v GKE_PROJECT_ID:${GKE_PROJECT_ID} \ + -v EKS_WORKER_ROLE_ARN:${EKS_WORKER_ROLE_ARN} -v EKS_CLUSTER_ROLE_ARN:${EKS_CLUSTER_ROLE_ARN} \ + -v EKS_SUBNET_IDS:${EKS_SUBNET_IDS} -v SEPARATOR:${SEPARATOR} \ + -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} -v DOMAIN_NAME:${DOMAIN_NAME} -v RELEASE:${RELEASE} \ + -v GRAFANA_ADMIN_PASSWORD:${GRAFANA_ADMIN_PASSWORD} \ + -v SERVICEACCOUNT_CLIENT_EMAIL:${SERVICEACCOUNT_CLIENT_EMAIL} \ + -v OAUTH_TOKEN="$(printf ${OAUTH_TOKEN} | base64 -w 0)" \ + -v WH_SECRET="$(printf ${WH_SECRET} | base64 -w 0)" \ + -v GITHUB_ORG:${GITHUB_ORG} -v GITHUB_REPO:${GITHUB_REPO} \ + -f ${MANIFEST_FILE}/cluster-infra + # Patch secrets immediately after deployment with correct values since for some reason 1_secrets isn't working properly + export AWS_ACCESS_KEY_ID=$$(grep "accesskeyid:" ${AUTH_FILE} | awk '{print $$2}') && \ + export AWS_SECRET_ACCESS_KEY=$$(grep "secretaccesskey:" ${AUTH_FILE} | awk '{print $$2}') && \ + kubectl patch secret oauth-token -p '{"data":{"oauth":"'$$(printf "${OAUTH_TOKEN}" | base64)'"}}' && \ + kubectl patch secret whsecret -p '{"data":{"whsecret":"'$$(printf "${WH_SECRET}" | base64)'"}}' && \ + kubectl rollout restart deployment/comment-monitor + +cluster_delete: + ${INFRA_CMD} ${PROVIDER} cluster delete -a ${AUTH_FILE} \ + -v ZONE:${ZONE} -v GKE_PROJECT_ID:${GKE_PROJECT_ID} \ + -v EKS_WORKER_ROLE_ARN:${EKS_WORKER_ROLE_ARN} -v EKS_CLUSTER_ROLE_ARN:${EKS_CLUSTER_ROLE_ARN} \ + -v EKS_SUBNET_IDS:${EKS_SUBNET_IDS} -v SEPARATOR:${SEPARATOR} \ + -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \ + -v AWS_ACCOUNT_ID:$$(echo ${EKS_WORKER_ROLE_ARN} | cut -d':' -f5) \ + -f ${MANIFEST_FILE}/cluster_${PROVIDER}.yaml + + +BENCHMARK_DIRECTORY := $(if $(BENCHMARK_DIRECTORY),$(BENCHMARK_DIRECTORY),c-manifests/benchmarks) + +CORBENCH_DIR ?= . + +.PHONY: deploy +deploy: node_create resource_apply + +.PHONY: clean +clean: resource_delete node_delete + +# Default PR_NUMBER to 'default' if not set to avoid invalid nodegroup names +PR_NUMBER ?= default + +node_create: + ${INFRA_CMD} ${PROVIDER} nodes create -a ${AUTH_FILE} \ + -v ZONE:${ZONE} -v GKE_PROJECT_ID:${GKE_PROJECT_ID} \ + -v EKS_WORKER_ROLE_ARN:${EKS_WORKER_ROLE_ARN} -v EKS_CLUSTER_ROLE_ARN:${EKS_CLUSTER_ROLE_ARN} \ + -v EKS_SUBNET_IDS:${EKS_SUBNET_IDS} \ + -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \ + -f ${CORBENCH_DIR}/${BENCHMARK_DIRECTORY}/nodes_${PROVIDER}.yaml + +resource_apply: + $(INFRA_CMD) ${PROVIDER} resource apply -a ${AUTH_FILE} \ + -v ZONE:${ZONE} -v GKE_PROJECT_ID:${GKE_PROJECT_ID} \ + -v CLUSTER_NAME:${CLUSTER_NAME} \ + -v PR_NUMBER:${PR_NUMBER} -v RELEASE:${RELEASE} -v DOMAIN_NAME:${DOMAIN_NAME} \ + -v GITHUB_ORG:${GITHUB_ORG} -v GITHUB_REPO:${GITHUB_REPO} \ + -f ${CORBENCH_DIR}/${BENCHMARK_DIRECTORY}/remote-write + +# Required because namespace and cluster-role are not part of the created nodes +resource_delete: + $(INFRA_CMD) ${PROVIDER} resource delete -a ${AUTH_FILE} \ + -v ZONE:${ZONE} -v GKE_PROJECT_ID:${GKE_PROJECT_ID} \ + -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \ + -f ${CORBENCH_DIR}/${BENCHMARK_DIRECTORY}/remote-write/1c_cluster-role-binding.yaml \ + -f ${CORBENCH_DIR}/${BENCHMARK_DIRECTORY}/remote-write/1a_namespace.yaml + +node_delete: + $(INFRA_CMD) ${PROVIDER} nodes delete -a ${AUTH_FILE} \ + -v ZONE:${ZONE} -v GKE_PROJECT_ID:${GKE_PROJECT_ID} \ + -v EKS_WORKER_ROLE_ARN:${EKS_WORKER_ROLE_ARN} -v EKS_CLUSTER_ROLE_ARN:${EKS_CLUSTER_ROLE_ARN} \ + -v EKS_SUBNET_IDS:${EKS_SUBNET_IDS} \ + -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \ + -f ${CORBENCH_DIR}/${BENCHMARK_DIRECTORY}/nodes_${PROVIDER}.yaml + +all_nodes_running: + $(INFRA_CMD) ${PROVIDER} nodes check-running -a ${AUTH_FILE} \ + -v ZONE:${ZONE} -v GKE_PROJECT_ID:${GKE_PROJECT_ID} \ + -v EKS_WORKER_ROLE_ARN:${EKS_WORKER_ROLE_ARN} -v EKS_CLUSTER_ROLE_ARN:${EKS_CLUSTER_ROLE_ARN} \ + -v EKS_SUBNET_IDS:${EKS_SUBNET_IDS} -v SEPARATOR:${SEPARATOR} \ + -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \ + -f ${CORBENCH_DIR}/${BENCHMARK_DIRECTORY}/nodes_${PROVIDER}.yaml + +all_nodes_deleted: + $(INFRA_CMD) ${PROVIDER} nodes check-deleted -a ${AUTH_FILE} \ + -v ZONE:${ZONE} -v GKE_PROJECT_ID:${GKE_PROJECT_ID} \ + -v EKS_WORKER_ROLE_ARN:${EKS_WORKER_ROLE_ARN} -v EKS_CLUSTER_ROLE_ARN:${EKS_CLUSTER_ROLE_ARN} \ + -v EKS_SUBNET_IDS:${EKS_SUBNET_IDS} -v SEPARATOR:${SEPARATOR} \ + -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \ + -f ${CORBENCH_DIR}/${BENCHMARK_DIRECTORY}/nodes_${PROVIDER}.yaml diff --git a/corbench/README.md b/corbench/README.md new file mode 100644 index 0000000..6fc954f --- /dev/null +++ b/corbench/README.md @@ -0,0 +1,16 @@ +# Automated Cortex E2E Testing and Benchmarking + +## Corbench Setup + +To deploy this tool, follow the steps here: [Getting Started](docs/deployment.md) + +# Archetecture + +![Cortex Benchmarking Archetecture](docs/Blank%20diagram%20(2).png) + +1. START: User opens a Github PR in the cortex repository and sends a /corbench PR comment to start a benchmark test. Github then makes a POST request with the command as the payload to the Comment Monitor Webhook Server (step 2). +2. The Comment Monitor Webhook Server monitors for /corbench calls from Cortex github PRs. Upon recieving a POST request from a Github PR, the server verifies the validity of the command (i.e. does the command exist?). The server then initiates a dispatch event to the PR (step 3) as well as posts an update comment indicating the status of the tests, links to grafana dashboards, etc, or a warning message if the command was used incorrectly/with wrong syntax. +3. The PR runs the dispatch event using the docker image of the Corbench repo code to initiate benchmarks (step 4). +4. Within the Corbench docker image, two versions of Cortex that will be benchmarked are built & deployed in separate Kubernetes nodes to EKS. After this, we can start to deploy the metrics pushers (Avalanche). Avalanche instances will be deployed to EKS and will simulate prometheus instances pushing load to Cortex’s remote write (or other micro-service specific) endpoints with mock data. +6. While the Metrics Pushers push load into the Cortex instances, prometheus will continuously scrape metrics from the Cortex instances and the kubernetes nodes they run on. +7. Grafana displays collected benchmark metrics & github notifier notifies the original pr of results & other info diff --git a/corbench/docs/Blank diagram (2).png b/corbench/docs/Blank diagram (2).png new file mode 100644 index 0000000000000000000000000000000000000000..62490cfd0aeb9ec0bd816d402539f2bb36c78354 GIT binary patch literal 83980 zcmd?RXIN9|8a9eLj7g*X?xH9_OElDb6sbCIjpR;-uLOx{oc=tx3Plk z5Z@~okmY4HEx&b@ zNs#F6Ni39>ic;;_rNlZRdFR@F@npHT!4wtaV7#7 z{IyqEe$42q=qic}$0dMCjbKn*2EGB-_W#fSD&H?UPef*h7;BO~c-Q9QC;^p_i`Vq_TQ`WmB+&Mq@qy2r`KaS}b{B{AsrLC3aL)=uF_ z3opRVqo9;}&r@{6?dY1EYJ(#t{A_fApr67YDnm>OZ>cNQK5cJML)kl$G&X)^h;AEl zT$jDYYzJO;rue<9`ST{Wyy(&mt;r$+&y~)yx*(a>a4^_xH|e?Gnt99pO`@w~@o*Ji zBUE9;<%9>7hnCzPw|W?N2cSIOiY|%;Q*d{W8ytz=R{G$bixisQMIV~hR~KD*Wo%<3 znPzzq<(Pg)WT;{{cFH)I`bc-uhn^K~7X4vt%T`&*52njeC)LVD|1ZY*@wAz#Zd$8{ zrTG85kC@qX2#go5uXyJ6TF3l`j4_oFn%rylo|#6u8Qw6?`tnxM zwr@m8gu8TyhxwmABz5n>8T@IPA89ejb|XIA*o4zfXW^z+2o$8qV)9~KW^Ucs@QC!#V53GZmjY4PM~u1f7hlVForTo16ORbWje zl;s!lic!BQi63UKdMv@Tn%a-^qLdoWO5J-5rdY$-0dMYf&y7@e)6krR*OqxWhv~GS z5(W8|e~na<@@FjUTSgM!sY_F7afgp~u&sRzL-||8W)~2iyJ25nFrj-SVGleCv{d8p zq}=TBE$f1b?K-xsxfN4$q!YKsIqM`k+4ge$O#8JglhraXBB(h~XQpYy^3`E8e7j!h zi}bZOih@rAXiBWB>7@*7IBgW`Je50CaSK|FP&$%IR}{PP>`GZfpiY^L!(_IKe#pTI z*E0NrwA@UB?tFK7#l44M?sSHvGQ8cwL^q&iXAsh;GGoe1Z&Qvmx+#*F{dppqX}ooK z{_aljEv|saWE%2OeIHO(1c>8}{Uno-??@(MAHREJ74YFIphEM6c8;;Ox-Y9Tt;83k ze#g}7;MqP(q7GYM4T`is9=fk)v@tetb)KK;xqQPO%o+bCu#aQusIo+A6R;-`n<{N? zs?SCm8Y5=6QA7E6lSSf^`yNnnPKDV+Z|RN{+cac~%)GLdxpT9uqq`@9q6!lbYn($a zX2z4eSY6j#r>{JVnE=F}8&TTi)W#2I)`?wO7?1E=|7ka(q&Lz{c=@iIQ0(@ey1e09 zY_6S6{7>f<&)M0B48VGpY`3e|EW{X#E39$n^fi%#k|&Tbyz1lFfR|6N@#f&Qj10mp zKrnS}_*nT!%ShvIU{f&fB|@~NX2ZRXK) zH0Hh%aH{Y1@i_4#womlKqkZ{m1&rX$x_*ewE#o@^yY<@EmbJVO@ALy_(G>$90$Mxf zE#&=b2PjUZ%qT-(6CBo!YT{qY%S1uAJoAbb#puAdF*SVt+A+M5(5x|S83ULzbXDc0 zgxN^G5kke}Hem)>)MF}Bw64~hz>_Ksvoy-bhR@6+XfHflRwtfDCBB5F_Quk6c2*&F zs};YJ}J;3%>}LZf;oTo%5hjDfCAd_4DoZQ90G% z;u5eaF4bdx9{p)AC}+vV=r*p+!yfWnL4ER_u3bu^NWp?{hKs%>!yM$##H>Ref0V4X32DkK8alP}-+wpd0Yp)#`FP2u4Bu+s^R+ z^}PFlPJ7*&3;VYp&~APPo;JoUDxaTCIK)1{NmISwerCqoMaGU>XFZ$J>lAC1=yfip zU~BqkJsj#@VJuJkLvUAUP0sD!4+XCbWUfR5i(D7mP=`Z)TRNbTPyH_kTpF`2(yNwIZ^SFrzh z>X2>w17AUn340CxtFn|QJ|L1_?Ja#Tr8zf>hdQgFgZ94uLEAGap>xJ`lIN74rnXZn z68m%=@vZK;IHf?*jul@~!vGKLT8Rh)oY2GW_na*xygEE@^NZ-hzuSm7P+P=Lv;CKa zdqxg|5;lw{R;-|#y;E16K2kP1V<;3u~KVjsnr9958KBz&Z4Lfv_h0KJXD>^P&s?X7kSqm6USM zdY}#q3x2ljFZyo78UD=ze=j_yg*q?K0a_QC=D%tE(Vc_`flB|M5)N&}0>N?Q1In`c z@5&mYmf^S!TwylJ1DmwTweX9q&&5st%>{8w8rF!(?M!$2D^aN__>_vx5oWFV12x{h zqJO>xC|JuVf))=7#jY&E3#k1LgeE5h4AsRny1%)d{QirepPgCyTli?1r$zi?jhJNw z!L#RvYT;)_f26dpx-67_0o*kGx3v+^a=X|NP2|5mmASi;Ms z3-Rp_3@#(T-aBt&Bnq5gJ@v-8VAxYh=}59G{MWY(>#Kk%8Uu06K<2M8F^N%1&C6VR zPo2nCQwk%ky=AqIv;gi;E*SaU{eS$c``;J|7+9g~F*RLO4&AYN|E{n0O+fqzZH~qW z;EVvzmO5DMN)9DvBTtg{cxtT;c>+~86USQ|*a`kzX#G^SJ1HohGf{fYzLNT0SK&w& z3A)fo#Wgb+9!Lv>9QCWWiHR+~$KF)ktd~V)iX<5k&QC0UO9gW<+-zewpG_)=T}t#c zYL*r{5j%xz1ccx*YzSuVYF-Nmih(%ZGv|;t{%YRBn-S}pd?(7G>E&4|dL$Fe;gUT* z50-&p#MQvg5|c#eV?uzQsF)v1Go1f^%TX-?MPwk0(_Hy(0?e#f2Q0pC4ITqm)Albs zYMABOmTFpdOP-nRbzZBt~uabB0iuTS<(|9!4PDkExTtMDp!c+TCT?!@~0+)$) zq+T!(EUGY@l;!C@n-(ttkwu~dI^N0FN|eyv{A@z$xtz<2C-C9xz3Oj-v+g4=b@O!AuB%(p)N993VXR?u=>&$=2ZS74s^ zc}W0;JV?qUne7Whta|kq1$~DR&Q7zMlc0@incAHEK)+y!g0jU59_;yazd7&dtY;J< z=v~lLAc+B8mf_7Ro$p8P0|5w>KyBvGJ!RC2ci7g?r9?iAkwq6K_;wu68kAUdxit@( zu^{X@*HGABKn4On;|(o3DotsIN2l76<$|iw)Gm@05e>771@ke-q2z}IRE6@rNv!QL zdVz1_0Rbpz;2NdG=~A1Sb9I?qac!P4yam#o81#UF>fR2c%v2F*(OyvqLXc|EmyROB zhjJja0F>}55{POyRrBc{C5QG)J@rp%i(!DZ4*BBtv`Fm9g)31wBC3B@>Ia)7&12l*~!9rO<8+wm;FnRX5o8Eo~T zbff^5PmHpYiQxK4Q}A@P1buz@;C)b1NGAV_E1I&{~_Kl z5FwZ0m5tUU9}^`SF|mkijpK*A@lQ~BP5;QS1{7Kxzh8iSFEwM3?}J}Y^5bfKDLhU z0PRS0fIJ!;yz|;E?TUlZ+yGZK!^V*GqGaY?48iTRufy2~kq~B%4j>8SYKliBVNVDs8>D6^A{VC%ZM{OLtkRJOm2{Sq2sA*41{UDIv1 zuWbldzt`WoNatoFqGa z_9crCww{JqszFzl!8;~UzL77@=$aML9(rfW&#uz@b*Jl0X?Mq=?ra@!=?-MjbNq#a ziYFAibM}EAt*jE%(B3CrU+XIh*J9*YsPis=4QPMaT0;WevvKOlPp=*k&-6h2<6=NC zZIUf+@N{hYovw0e1nV;AVNhaHaH?!a3XwP46BLlO(>WFHs>w)KAAM|=a+aS&7%x?L ze7yPMm|Kf^_rLToGo&$GPQaUz8C^?Jc|7t3$GM<|2-zixLB+70!G>$X?MC16q+23t z#ppmBpWe2N0RmMPXc-C`u2~F(FcnNL!tcNBUootG(}IFlBV#J=uIJwA>R<7o>GsGv zn3FG+Db^`c8ygbX)5oK00_v@cRi&8o%D+sqHJ%T$0b_?z*h&}R_br5#%WQ%E5Z`z- zBr>3ecxIluI)05Zc0Z6K3YTv3d6{g^R>`!JhZ3-_?lY`sC;nn{TsA7D=>bu!LAo`? zV|TK8{2H%LN1}o-_yD!SubKGVEN34T$8m#18f`h~k!rga>vY5Fbjf16OZfxe!BqlK zs1^)euW{EmP#KL%!UXN8Sx~@8?vaf(H@oX%MDIu$t2PkUkR8ZQ9Cyvz?OgvHo?i|< zl|9(lL4p}K?0~s|EbHr;uSLrp8{Vi9FWz)mL|A`jP}U6d1@i@hI%@PMmeBg`9RE%m!JxcElu1X)d%mz9#awD z5C1?^n<$`aNA2PQn*miwc2}DvLdDgKCcZr~ddYBoh27i}=$bdd@z(G=(dBh8OP(1& z!}1-1C1vWX?;YH1s&NF&8F5^5S$9p`)3Bo~(7xKRv}2jR^thYWkz^osDARNML4LMI z&3bb|wB*&rQ|FLy=)Utg0C4jP+M&m4`!vy6A&LC%2Cj z^3NHO_Iz9EC@Q>PQ=8N2o;S@QapB=7NQ1TqFUpt~Lt7)RDk4)amB;(8k{wG*QkE4; z9(7s?pg_cgW8cg0ycRJBJ~V#TvrF8yguBJa;AeF2JCvva<*D_^?wG90J<`K^?KDn- z=S*f`BLBFnA}Gh5D(gl<C7X{W=p$BU+>-{UYDUps=5TXQRdVFRqRy{V68 zysPBf2AVUDSbgP{`Plim$uZCQ6v1A@;c=?rVQep3D@P}8v5Z1R#W$u1o_~JNo9!nX z9_MyXSqL771a{A%rl%S(t22ztUI+#9>2OCxkrC}-YkY~fHaP24zB%vuJUmi?JtdIp ziVByejxgJZe&T3GKM93D{I$=yF65p5Cv5b#X49=WueRiFRraPHi#(#md{DkCbIkL@ zeK6B4D$cPW_Ci#2OH?ZFsfElTFU6km&E=&NjwUlzabChE5p!d|kZ;V>N#3GHo6Fe+pL51FwB0Io8djXw+WhG<17Y%@uWRCG zHKp3j_MkQZTov_B{bq(T-rt-#sw17Zgp97EUW@ZKL`FyDK2x)55YWA=07#5pe1Aq%*{AktMPfOhc-2~lWvJ@)M)h*ulZIFzYv&lrM zv5e=_qllvmrCHrcev*OXi|V09MmN_m9if|2hXh6<@VG$*2RR>V@}kS+9K1jl$a{U-qyv$QQ8el)sqWr+{zxB*nlhx}l5D;y?}dU& zOFUE~_UetCunfu})W1^C0S-&2oY{refA<=%YU`u#DC6VxVP}Jm`{|jNW0`mxpIaBm z;?Py<&p;blX$-WHM>&QHH{w=RJ{>ZU;mu`Cbb^*qY#k_wg8ebH9NzFy`rgkT-|vx? z{3*}RVfP$CC)?F3gn7-`!pqaNM$K?OD-+iBX8O$7Rdr7Hm_P!yk5--^V{Ageg#_4X zmbcf2naozbF)yIHg&s^!IKeiC%XE%KWe&oRsc`(Ta7Q?0%wQ!m7=E+cG6UY0a|%D{ z_a*9xHj%fw7Www|J+0gu!*~KS;6RrXW=%A132(YpbkPa z!rqYNUQ5G-vsP0w8xD>-CW4^dUDRyT6wT+;IQ`kW2fl9`2YsoHdGIGmJvSQWoqjbMiM8p*ck70J-)$xOrl6Z1@)TMR%SnBFJyN4?|EGzJ}PZ3IHFY>7*Ce1ssLT9e{}d z(uL$Ph((~ScfGfZ70Ih+j*>z+kMG?bO*4rgqqsO*$!8J$gn0%?lP&}8eoh|UE(Hw} zl)#$~qO0T$mh1Kh1<08!!#@lgxByT#`3G~#*o zpm>L@97ZS8+Zjn{tky}`6gQ07cOh|*k?U$M90PI$;BqAJ^7A&m#+n2|Wc_&By28L5 zv;sA>ijOtCim^aXyeLqv92YcQ{2C3?MX(gZ`lAI*)f}=q_TZPZH`NvEsz*=5%i(;C zrvBb5i|prCguc&blYW{*^PbY@c4Dzer8jDsnX+c_-sU;Kj8>PB;hLSkEQU#AVLvB2 zWpECSYrcNkm!)vWKz;hj9glHO_l36BAV*3y7Z+9eHoEwULuv<6VBmNZDni0YV4chR zR;f&MO!okVOOB;-5DUrHOrME8;|q zvB8|Ta439+>;LnCEJEk)h-z9Q}d|Bas2VU$i zAXa~lb=XDYuD-41)m%z6g$dubHo()2zU8h-6+jEn`y()YMvaqvnEQwO%}Kyv1k;mF zMdfCZJcYkvRqF7*ym!RwV~~hJ(;BaeyKN#X)zgzI zNp;%;YvP3WmVhZf&(NIGyxA<5VY{Dy7<>(})byV|9#u5;kBk(A+VuxNcT6AMJESYdJs=<@jF7{8%ykz~T3u*MzQ z*T-pHnrF5(?}t!fT1uwH(oNw`%&z*;3Pn%`8-B$RrzpF9nGbh>$SL{bVW2fB&@%S@ z0Gqu^s!QfK7|==@)eLiQhl2~&pmn4|6mMraNWPN`Cv_wEOh7ikoIB!hu>scxC5elhW=a{pcD zj}?rHmA9;k)n;QT)%K_|xQ#2g79dwrs`?FU#W6TD6+Ss2oE^eZYgh3#zb6R^q1C2J zQEsb}=$LCy>6^0+^&tL*cQ} z`#|58RC>?Nn(r3kVw0`YY(~lz=%+|A=8(ZDkrZvZ$wgXMn=F0~Fg_4Wil%yh?z1B7b*j{?`1fjM>_$@1_hpqj zNtw|Xii(bU>-93}7~&Zyx#bATFNYSJJbw|FkB0<7G;@>9NrON2y3u0egDR-Dy^Aw} zZm%Qw`FbWY$``?cPCX%7@)9qt%c8mVfVFPmz~t2va+cV{sHy86(%@R-`mU)PUt-g82Lhcbh%Q zCptN%=6KiLK?Av5{rqAaO7TTQGJMo>CQO9W!;Tbezrh0 zYo-S(xqDK<-BGd0tC8~LJS z53O0()aSc=rt?0PhUJM)W%kBi-GEUr0^@HQ-WVAqBE3%;Bm|Vf!oOv91*FYQGj<*% zv`wQ{QWCae?_vn9!e*0EDNrxW=Z)Rz65(S+E2>fZ5iLO5I?!5Cb|B=p$F$=>N&>Ak z+~^PSl=I|Jhm%%q4~|Nq*j{$eQFuQ`2zy!+`P%L01MKi;9yb4i30W|6vVYMbYGS*t zA5onQTEQRpQk)4OE(Ut@HN2w~8wSZavyBms9w=rnLB%9zoJFIi7x<6;RP@Q83Dp^0E7R6So-NA+2la6y94`;ipbK;*%lQr?1xJsw}_| zVE_z~jf0vmMvDDTGX5viB=J4QkdjoniZ2pZeMB@WA~-^F21(IWL|fiS|C!$CXkM*V zLw)5nyq&AL{}nY{HSY&7`m;vu0AlfHI(GNNjwGVa)jqZI!MGt3BauY+JLPB-A5NO- z2=_J2Oo+J1SFpCF=ov@^7(xaJkGpe$6CzTFiS){wk3tH3othbr z#%KvfDrWM49;-$Vj7_vvUkZE-POe)>C0hXPj5Bk+Bf@Wm8oNmFya`t-D^*N|bM0u+ zZOc1j1*0t9`9S<$GOQEG=1+z0o1JQaKv^Nt5o>`e=@~Cp8g=!oM?m}%L5r9?{o3NA2*=qRGNj>0>6O8_(lS->U?arKWGIp<{(S{nIeFFj z)YZHLvoDYr{6m#W*{&tYjWwxHy4Etm1sKNoV-m0NZB+DWjbUQqp2!%sw{I|}Wj^ZH z!?W|s1zy>3L!>brapSbOx;)w>T+8|61Br3B#iI39<~qn)$g&oG219oQ%9s+4 zamK^J>@#fz!L!%a z+Uo_*Gjuq}f=0cS0F?ObA@9|Z&4dHKu0T8PwBFOyN{lq<8f(~Mr|%ISdVHmDT1UY= z9B9eGh80L?WU#-)e^sMDpejhY$P)d&_c1)Qqs@ zzYW_HHw?31T2olrledid%*z6=wV--+vbZ>ld_BG7JK`_H0VmfrkU?IP%JT{}tT-D3Ux+I9 zHkl=`QUp#`d9me9yFeqznIV#SP@|aS(q2RJNF)3G>e2YdS-*7hAjx*7 zQ8Oqy>B4i~Ne*W$3_8w-TEg-yx_hR6Aj_@1C55);)un!wP>?vR!1E^&1ow*C+(1|K z4qd0ldUjP%Kf>dSgQj!oJM`>RvfOifjP?R9?`N`i=A{|N81As?(|4-nZexO{QJoSa zE;jL26S3W>cd6^~CiRMWk5*<{;m%6g7INRyLbKwhY6*Jh;RH)^Xs274u=1oYh54Vv zjR<^g;Uj|4({8&fVU`MKI4T@I0O^bXkVpX*kWHk>)Y?itYPU4XIY_|lI7E$L;UOr- z=hwph(35zh=x2S1fpE9uSz{&vmLE7GHVpq?l{()wssQNJ#$o$-jYfG=@PQ_?lp_rJ!RKg}#_s7bC2bd4 z#uN(2DP=VI9K|4ppVl%Xq+jauq@DbB1zLX*U>Lu}8-P?M!|!M#;Hwwq(VzGAN+aDS z*Id>vpmxhjqsfV``r!@k+E|G#hn7@`g!6188k> zF50P^FN1 zST^qr@)@l)t}n+ad^}=lPrD$MVN%bG^75PweazF$%)!HB5P>aRzZ+eV8Sz2g&}aM0 zh^6*u#)t@v*`S_a>%Mbp1A6ZbGD8I3O0(S5_Ml1Gt<{0Ea^T{Zk0y1JH(TJ)D&i+E zVr8A55hz)x!Z2Z1EX7jnrLD*=g#3>-+ZNOSN?gBTJp@Egn`^CFVVjmM+Wn?P?e61` zo-RO<)&jP{gt)_*>3Q$@a#v5rjiT#Qo|br>?UU|~*Bv6780%Ymnp-ZE2`Y2)KfBFl z+H)WZ-o#ab;KBkP7qpZQ_Oup`rJRE!Lj3qfkP4=|LC9p?BHoFh-BEb0yFXNt{_gdF zHlukyQpTC9po7olm6d8BMBFKppJA%6SAoR2!RTl+3t&U_zHCW|@U;9wzK*%9EdKDx4$mKz{X>q#Yup;AKBP)eT zT;*7;d^T7Gj=)1^E@tVdFS7_oGwnnEB`bW(fw@CFlDr|871Bpb-7YOmak)jW$J`9P z6o(?c5F~gmJ}}R3(P-YRG1j8mNm%~FaeqfoDu!z7ZWVKheJXh^dMUI%32Hz3)#eTE zUjBn_MFG9ImssLpE7PfTRx3QogJ7%LaM^A~$I<_{(#8OXUpghVv^^g)L%$!#hP5sn z78_CB`Z$p+j6DXJfF581{&6?7WLPPD+cG#nAluvUa6m~rq+r6{@RPT|7JozQ+g{l% z{*p+7ba(R1bnFxITkxYY>(s&dnpUuJzu`J$YkspkU#Fs?fPGDoZMBXGqVsC3Q^&#q zk{U_d@%tyy%xim=oKrI;EABNCarAG|egg;e;MR zUya=Kl~MNhEfUn@`QWUhkm5n5v)FM*%{zl+I6F-%t@WNG`~knnHO)88{Isv}->rNO z^ir?0X6s{(gVU{PVD-X@!~WGjKGb*krkw8V?^qfR98&J3k61BBBfOY>QEib2f$_}E3dvR<-hSor?fK?U9^c|f(d&-fC~}oXzE4_7s%ht1$~l9#zhDg9 zo;x@ zKA8c$Fy4x$8f6*>>+wVfR9NZ<&7Y>Gm8=dH|EN*MoXT%50O%)`pJRhv2t|DOHLhCc zOpoRZxgA{KP@Snp-d9m@#?xB!7|6cfN&T$*vJnk=rIxXT!%bL}RYJc;&ToJHYve4y zQFBmHSBN3N7dsfZ;&=TWEX;MNM1#d?U-;iGr^yF$jo|>!h`AQ!XJH#I#<5&}_ zw`b~!b=S=+x(#R97!5{2bEW-2mh3%($IXg(k~g>f$Ebk}_QIE%rtgP8$*2){3E88W z?NzvCo$lguS~q^-20s4WoG7WG-F#@jRe?l9Ky~3Gqz^X;-vnRV^VjG;mshHHj$xX* z`Lsq0^+{V(>U3D&IZCNP70NH8M1p3Y)#C8#!Jw-NyM3K#tGF_lCXLk9teP5GRV_EN zM&JmJ)#m;hSdO-g!*RUKrObp9>a%~&MVa>!cNF7=WNg8x07(3IoDfVk{TZiI^+9o^1&`eN z`fH0X+k_w=^R8Eb$^t{4#YzBl>9_&F|R zs?dAM23()u{9Q=rsHKupd2pYom^cinFtx(;?$)yh6aKPzk&%b_mo3}T-qeS1NsE}= zvjw_ccKEf5Zg=&|ETWq2<#{GSf#y@-3L)4)G0!9u^l|WlABYT{Iv`{Bl*`dbc6&y(WQ$0Xggzsj|sND9UqBo zl0tt@U?NBQeOqRS_n`Bja#1cw|#_dTxJwEO3xIf)BSL z$Q}&SPXh;Kl%%YPEaF)???|xD5XI2tty|9QFtKhhzd83^k>n2I6|ld>bcXkD?*-r! z{0xQB)RPUnCAU_Ugtz0Kcjp_*>A=3Z&}QZ|9ua3AH5dtDG*3rnAZ;`A)9g4OEFt+K!(KyIVxR2*cq?%+Kn2i`9nK(5TcwLrlBxOVXqz31No+PXS+ID zntZzxi*AHorwkF*YSzJ?GAC%wvyJ+zks+QTj+FaQ0Zffc7s42s496RW{?`u&G zkVO9|%716{ZU}L4GB3aEX0c*Am&8tsd3#o{6@HV(X?xEE@zM;i*865`wh7fl`X9WW zHj+68unQU+CTKu#gVN`m{jHxxyn+-XjY)e3ycJXPwe6r?H8-vyNaX7sf_G^;*j=F) zXyM7jL-r*292UDvDyVA3tR*@$u=*8|`T}pRQMXukhSA&}DYG&>RotjTc1S#m^qxGd z7DV2V@8z$w&3_Ot&>0{lMxanY!Zq3SQ;T^53wq@WK+sa4eKhgK#EI{2kT$SSlua8M zHlUPSTF_6G7^e~6#RWnTnxh8lfg;h?wL$3l0YW<(k2*Z0WAH6?f?SZ8uHV3i*W<^+ zB*qg0ukSPOPV9kuV2?@fY0y4SN`cw~hY*ywNN@jQ9r&XDkurtt_ywk_Ly`fL z7ZYbkF+^AAqw0x!;wJVDaMz9FR?i(wNw)n}Q<|qd8r~yFOcGZzwxHVjR2XNMP&eh} z)UN+u822N`zSK%D2RB9g*G`#)b48f1&=!Y!%OlJWQ<;MX_)M zWVz9~29=WCrb2?!qM;@Vyot|aUM;kxy^Lz~sDUnD=K(6<06l{rk-nlTUEqfhVhZpz zzk_@K$in_2TCMU255~;DG8g`l^}SIS=l)N|_J;9W{ZA_T%PT-bRrWt=KkpH%6F)=a{7&n;9p{!qJ z*sQ;QQFamnYAGqG8D@sw-S6L{#~e}w{f4@6Ml zXpW=Uk=0dlh+o{KAtQJ87BI_n8bAz;WJD9B1AEp{?`jyaXCap||^^bRVMhUmRl$yDu7uk{%Ql)Z21(;IX0ZgfV zEZX>a>8-+mn-^N=%0(=DHfyCoxIUl-aP^hK2=P$u;85v2!HiKdbokAakUn4>utRj` zfendP&pr9v?_eNn=F<=lJ!$TEV-GODM~(;Dke&B|G1*sdKBf7sDG#hzZI^V0Oy(x} z#Wr>HG63N2hqaS!vjyA=1eYY>YH>)u>7s|81^~2+Eq;-H{zxKVeQKdCO9U$d%2KTI z;(8cB*4Scnld=+T-Ns(BZk26&xmm38(VecUc*>?|zm0+24iB;6v!}P-s>E^9Lj+wM z+|sPpDruA0@K2{_E}24Ra0APR;AN0?+b12*Tiopko;)xZq3O9s_l)Esnd@_crx~QO z`N-eoZ#Mvf*^gv$jYDGRI@ru)J)@8L_falnvqswL2cA51s`-!FFi zqzzEO{pVolgk zqh_c~F+KLe>b!+By{2wBd|XtN0A^H~2-t^{XlOm44A2SslFeGrPDE+=?frS=YZiwM z@}S0L;pN@@!zqdD3zH+Xfz)#*hpqL$9gbS4Y;Z@XEq`Df78^bz@;ey^vD*!zR>Jj& z7psr%4c!;iX;j)rjmsnh*oR)L_QZ(7kLP? zur*o9oY5*EM#Qo@g5o7+bVJtIPYenaU4mZ}hAq_o@mkD(z81c1CP>~P*n5pStqlX4 z)Q?>{eQ5b!!P@JF$ml=r%l_y4WQ=4SrXOK$0`tI@GbBZBucE=J_Fn~V`KEW5Vs$zP6j7LAi9_A%#n5n%0Yh~+kJ`ZkgdEhfdcBB!bJ`B0H50$lMl`C7uJMIk9A+C6Bp~8O zW}oa|$s3(@vR)uKnO7R3`A_(#xEFBCq_*0SGl9G@zy0(il6Tv*0o1cY_bB59Fzo3S z$>xnv;DV>bn!1eU5W4nL9({zxmlc}3LWTML)kecB(C7H;Cb-N;1&IC8e5a2Rz6 zw5-IwnTg2A0YH~v{s>EAYOaaEgv9qvVp7&{mA8I~ylKbr^(o%cs)bZBDChC~z1A$y z8P8V$u(fT-lo}E2SpUKrwbB8m(JSH^eSM!Wre*vZUg%)HARAvRQKhpwGx@~a8-L4q^GKv70A^ijV0dv_dMmL4?e#J9<5>U;aTUM z3Bj8$8_PJ{#-BgERm{my6rCe%R{NM1)4Jm7`UwZy2$B+O@T=AK=%Yu(wcyTh{j?IR z)hKc~03_GW8oZzzHEe;`yX(I$!s*glQ5{+Nm=rcK0Kb4Ia+TRS1xHGgJaUda2ZnPW z6+qh9q_J4T(i@UNx#U!S1BTHv{awjZg^PBc`-1GPgQDGq91baSG;|;X z1|Yrv<`;m;Sc>n5Vv_HiKWtD=*GwpuZk5+FNB(5qosdG;csgCx;$H@f83#ZZhLbVI(jz8OPZ4uiu0-c~A0cW9{}wn_PB!C zcTBZP+YRV3OCgo$#(=>JgzMJFPKfa~Bn;v&8pu$BQ&zp)w669BK@{)F;=_%@C+PzH z-Ugr}S~1W3GKq<&vAuBaA-l}B95JVzK#Z;dg0V z-H;bFYfb=F;D_g+J*?uca`%RDDBkkfes83bmwR7%Z`#PVnxL9|Ol0t*+Nzd+-DU$N z*#1A_Cy?y)X+Pqwo1-@yWZ1^tV78hJ4wv-Nya62WehDx-oSb=_KAn#Leh$Q=1^i_L|QE&Tw07?^tb%+a)l*T z0Rm2$6zHciJOUqR@J|X=8>BIm$?~HCi_UwHknSISP}aX_`}9)g18N!&2m3RNGuA63XdUqfY?kUfka@5>KLQA`%`y*hkONKvcZB zl~K~iu+MBb!E}@S_yibsxsKgY)`zZV@Q1z2o@oVKr}P{$m_y#SgU~ff6m>)^5lyB~ zpVhoM$kqsgYEFX4jhGcAF1APYVM#dKuQK-$uJWwH8Gs^WTC}9Q{fMvoKPt-*rIm(} zn6kGwOxg*I9BIH>wP0Y5M(>Nl<82}470OZm#Z6M(Df6~QP6k&q8yO=C>gA@iD1iRz zeLW`xNM1d4=QBrP^LNhAK6!l}n5nZHuK5qAS_NfbSGZEy^kd+Y4R1a_>7%)vYU-?d zo;h2!^^Ui>q{aq7Yn2yLG57KWV^D*UlkXDv4hBi{I`Qbr{1c2RFBSM63T@@5;yGz$ z$IQmH+(vg=rY9b(Q*Pa2T09<#OBTOsLD*uT=ezb_cUjW6WN&A9Z>RBDaM;3|L0)}+ zNn{oPp1N5*vo?g`N>cdTA6TkVEKdF-`^L8L&sy+UA}-4CWo(Z|5V=Mlpdyo{@PkD` zeb>{@>?~mtlq&|T;Hq%=;<*q~|H>~%^F~TXGrdP+{ky>A|EBIS^V5fFZ|-|GAvr-c z9#o56Xl;O+BzzNR@E!?5O72MmP{YxR6_C2YARKzl$1>^I6W&LeAe8$WSSuu&%9@vX-ikq!MB)72wvs_dk=&~i2 zn}hh+vc8D&x%cGbq+j<3xvAq6M)gjzb)5G$yf5e^noOM`hhGqVxMnv0^zo<=U`}PB z1qeT_xcU%f_Q=zH&Qb-on!74 za>}|BUVA>T0Sho!X8c-^>CGniD<}3&X8iQ3C^BFh;-JA(7jDsY-M>4UbDj1;oY1U|HEotCp6Ekx~T~N3~XM7wRTT&zc-g{m^(ubV?5~ z0!4+4hb-YnvS6;>o`W+$GBIiN!Z;TK^Jm`h3U~ghSB>Bn!rde0aDzK?^Of-k_E=(j zEtfjBJud4kK<53&)J?Z|CSZr92@jHyaJarH?# z&no@&Hoe?0>YqvgbfYp)#2heGji#~>N#$XQzef8<`31ISGAcoDiJKHV(&t_!OVFM) zvgCznBYkd!q9CyE#GFDZqj%M3l6Qt#MVP_ggPLm}>$D2n=5`FfTf+YbNUTK$oL7l*~wv(DcfylOZ_2fLf@hb$W$ z`_J~{OG^m5{Ci9IXSwkeUVQaOOZkULzT%TVpF6$vGH^EhmwNDPx4H%RXxEo+_0L9w zd#5W)NXsgSosBc8vyK@rOMEva`tZhotUH(uK#^(xc0Q*zk|P z7pH$|oo}R&JP~|X6D~1zQ3<|R;})YcT}3qV_RT-Ed$08KjUtyz{35yI6#lp;V_>oC z;yxU!atGp>wQJ5b??$wBw5dnp)S;A|)L0ghLQ>fb^i*HEIgvQIF?>aY`{}Js*>f^& z>g8ju+REgY_ms@!3YFla-jc|tfXEL0-b{*c=t{5%69N2Z<0~8rpQbz0Q!~8YV#jgk zPWz^hEpLDFhv?x`&iS#w`QdK-ANIaIp6UMme@H5oM5r9PQ;tPC$Z=CCMUE+QzNv%= z!yGo#>5j;fW6q_a#8S?uQL+%rapkmf8rH0BnAv8(_jI`L?$77`e7@h`pTEcNuO1I= z@Avh(Uf1DyJ+JHaTJZ{tzyhkagR!d2kOFzP*Cl?gha6?2Nl%*%?D95UDyqSX)8eDx3)o;Pt~I~yFK&2vof z`dvZpw@P;ESVsS{{et>vc_z4gUc=Y3E`?g_U?SP)0++H|cyqr9@gA5suisBGz0%%* zz{!dc=bef357Dfg`sJ}a%Bp^|AA1o&0_4V-GI_HaH;Fpr+5x9@2Y^3 z!XEin$EmD`=k0~LR;i3u?;uRZHZKL{D+8_8v-Y7hL=IC1twVFK*DY>1C%VCoIz1?U z{2qRR8#vF6RoHAte#+iW>cBsWcX%3u)gF*OIMYzv9{=j?ODb_|9c&@=vTrEOp)=wT zau`RAyQ8sU(BF0$xTr*1k55WDk%9a0s`17T50{vSW!2PN63E=8_X!hx2jV3+Q-1@T zPpb2E2wKLffO@YV--F#dmJA#yVe64!l;}~&oUV^9u{+ooD=c`wgB`MYUHS{`ii^t2 ziFW!-%of+xx6&Wp8a*bdvEZHI{czIzph;)WprdPS$ox^f#$+suO!n2awhb=@jI^%m zY-NC}Rfc+@Q-|xI{|xQx&aRaGyu!;&w9927eoeGet~rYHAVaWoEw0ya#lfRt!Y7_Z zzeVgSF{C*aRBicv>x0cP;=!PT826(g7AB;~hULdW^i+#QY0UA0u)6+r+q=5t=(KlU7$hGM%`fW_PIqSyD$L}J3E0EgScJh`zRb9~_92iOp>zniuzFssj zOW4A5*?ahosOGv=r6Xz7)}mW?xFHa)TL}$_zC@C6k=-3RyLME94ra)qcy?@v`mjQs zc?8%rY07~C*cEVCd73zT^!T-B@)BCU;j9M@fe{$%BNJ(3zuWf4gy6Kxf?Xp%a1%*v zio@@_D=P7#Ly+%&?bq@1mKOn182!bXukOapwkL{v<5`=sJ_EN3ls^~G8fGn>#lbWb zV>fB)z4zF4ywh=n@#qFTJov)Vy|&|*QxHeSpMqXcr%)G;>`Lv@da8Dq(D2sAHG*D;&|GNm(_0|l3B$N- z_w^~vO9@oK_J|thT{kt9_r8|bQS0jJd(2RNhFm@57Zh7oH1TDru93^9i}|JBxo_9d zAp5^vLlXlook|=q2a(i-qov>c(a);?ATHGd9XW<)R?UEND2uat1Yxd#SR3X2|QsADzBvgvESiPd2UVVfo46#>qFV5hA$K3klfos`qfi6Lhaqx?h z<`-Pc!fXcaw`0{*IH5r^o}SAqVjcG+CfXEkH9WSvKjIMa?m9I)diAcngdJRMr_wJ& z1@lL!n0Iyq>3DD};$9`FoqKq&GflaFOM9EUy?Xx_ChIa$9%_&@<`rrXm*P07Fk2MK z5hUk+gAu?LmL{zhv*2|T-VSob33lM}y1d>Gu1-WkDApQv2 z0HeEJBtCSRjNSNkY2Si0qhrhV6kzR&ic5R58hRB5z=Vb(3$~t~Vu9SA!qa$@Gjonk z7c5OLJtxhsyJ|>4TMa!Vg}e zZdXr$*-K^tPVllJ)Aja;zxyW$q*EB|pb)+zd?-ZyYThCJou6hZth!Duf9%?uR?@Gl zZ&)x9{>a&mNL{Ks>qT^|pRyV3P3`H2nb$?0&8(_Gx({4-oY7vG&vGwA9(AS`I`{Q0 zM<|cH1Htfg0=iZ5Y5Kfmz zbBS)p{{oT#h+;4P7hZ?|DC58IoC2VzE&Pta4;%*q{q5^jaaR#3mDv+7ckVoV3ZSDi zly=hZF@U3_u;psha(m9=t>v2NXy8IsI-dK77*~YGD<{$&k%P5}^yp}VLimNzb9D=Q zXsJnEEZ|P7u_OS1yv=L7x>P}=&kge4pj;xHjg5i(5m@}wlT;pd0nd5KI2N1-dv!_{(5azs(zLW9z5dZMB$6a8>(ZD%-0ITgpsaxB&>NU=Bw$5Zu zXL)`U6PVFIZ#=GK4Rq>p+W;Uc4xAALkg&15>++%kqB7p*^J(@VqPj0N+DQ+10Np+h zX#E4-A{1YZW9_c&kC;AdwaivZgYo#eN5KD~4)Nau9ESzo9c|D6=SpMGi>k?6AOYkn zUY-1}5*;`%7Me1wepWC(ANYFZX5eFC&j5M&nlu5%5yk_Q$O6EfG#>%6{@p7&FBk%7 zUN-{}XvmcRnbH$SCKj8ixd}YdRqjl=SE}idvQ9sZuQ5V?>ykuH2Hw(moX&qmSY~TD z0Qdv7|0-J>tvZ%?oYzQ2MquN43wStf;NW@WD3rm9iFN|7Xz>6G`5b5+U8!8ou?HUV zU3a^OwQew+Ja)`8Kem_*UQsTK+1KxGYKA&PmAHoq_7Vd_%1e&K)S* zyYjIRz7FX+iP$lKF85^Qtol=DZM5@C&IfZ4HU)Cw@}`}xIj#HE3daMN*rd&5whx3| z&Sfu^rH_m(>h)((I!M=gCz=VRUp#UYB^bzgGb(=e#!$sjC2~Tu4(6MMSVm9W4fM_+ z-3JDK9D!3oG4l%u=K}+H!|Dxr6tJg4=U(+h-_DE8Oj98UG!e8=Y zfZaUxQ{C^l8r!nI=u%L;E|_Z_SS?=G!3BZOVOfHCbAypRw_V>!_qaPFbRSG{mdA0B zv%DVKoIdXsfhqSTual9fJYq*kq_mD2Jv;0I93v%^76ah?5u?6SA*SLu+eG+rc4)l@ z27~kqdvMvV41-lp(YL#t)_&PBkj<_!5l(jaGSyxJ;9c4Fc#j?6*IgkbIBpjBXI!hV zQgJY|ha3^k20Fewzir+G^Zju1`33#wJZ0eiy!@YgabU?gs3dK3^{FFhsS~5*0daXi zx{Y7KmXv6e7uPV@VcAi1za|J(R7fPL)snI-Z^@LpFTby@#Nh_6H)eL^-8}BjGYi+o5XKMV4$DanS3{)-5 z^j!ojGa=&yVA=9MlD6@dT<3#&6%6)BX;w9GJwR`avx&>NPdOuEE+TO6uDE^(p<$`P z#XqKVv8zmWt+?D`p7jphwnwO-G#}t7SHKs#_x2@@^=X;Co=G*_JZ5vayW~k<26~+u z?IS5-$+z#(UT*o@A@H*|82+Z(5nKm{AQ9pT?CAd})Uw7lxoK$LDsMn(Tek@9EWekS zHS_Uz`8*Z(^M=8aM=8+?_2&*8Ubp+eK*OHJ@u~D|6{GQR@{1Gf(X@%XcaE2TVDMhT zDz$2_zz{@|^Pu$Ev#*=ZxejmM@nD^mYn0XI%DbOsoQ@)*bV8g@HJ zrsKPJ{gkZk{X~b!HMJ_MzofV9Hk^%-3-{jC@*-g0)4h*w_*4#ok>Tt2-`CKY?qq5DIe{&-tm9B> zQ;auh-|EwJR_w*i?H_T^5F{?Bk!xfm3~Ct%&l($DWZ1~=0yPfr%oTLAU1i%zYPfum zIaO%W_%O-k%V2m0CsZT0N${%l4N0@n2G04B+kHw)#YqrkNW9FriX3rZ&J?vPR#N7H zI_4eK`AM?tsrD$->D}~o{!?r;C;^*@{~eBexB2RrM<;F-h$$+t1tfE3s>sEBPe#h< z4gt`t6!4e4owK5BnOufdx$2mvJ*gpIInVUwcb%&$Z=1 z7^3e;N%6axjLLlFbE^HJ@dW5Urro1nt;#7&d|)#r{Yl;)P}WkTQ3)Dn!m-tE+h%6 zmE*1?%M&NovIevdStuj@pb}i0HrMGaC6Y>3Z0#w{p6WYCVipA#wvjLsida*^DfqMV zF4A6uiyO0KgNm|_kra{0jlkmSe*@62N1y_1XYPwI)s34$pZvtF1oo*3s*&0eT{4oY z<_2tQC-hpDj+V>1!jlbSJFk-Pqc;)L!lUWYb~}`a$q7BJ6{~FP?v8U~=#M?#UAxs? zeQ(ib3EI=cOd`UU=j24rdWYR{ccIhn#G4T@goTyNr@dD{ZsUK1SXo ze^muE^rVq+BX^F3T)gUs!gta%dDB?%DIt*m)*)2Zn!+nCcW$f#RXMJ*1;yQoYGauq zxe@Mk_jZN*5G%l(Y57wiC-E7{XV!t9E44ZAxbdBdLOm+D~j2KRU@NM3R= zhKh|L>8`#X==PT6?A3zzAz85>=-<{tEoVlpi(*qY0Q;_dE%A$E1LZgY!nuLb{U`;U_@ejQg%^x3=6Vgo+ClVjVkafbGP4PK6+?i>r7AR3j_|ZW+Ea}`{ ze+h;JBoHVC3}E4e z;}*(1!-6fS^GG|ed)OpY=Eeb*YzkcqX0fIaR7G6w!We{YHfn8PM)1gFn8A{d@}M9{ z!({69ip6J0q>0=B`Bws;mDwVY%;;NslpekCxJoI1Ob)bEE2t5$(xs!Y=FcNDRyQQhW_K`7h zaW659`v3)BzMv0koLIx{FN(R?69^S#?2!74gU%n>ZTTn`%vgexjGS_)f^T!Y-1Q3{ z#(X`*b@eUsJvi;6caE{lAZf`J;}a?8^!iT|pVW&!ZYFv8<@oeCGPt?}u<7AF3HXmg zx?MGCE!|;K#ja|W`XpJk;%exNq}8AyQ3$i8GHaFfqf|~R#^B>>kpFpEu{8b;0~-4v z{4G=fccO;R-BwsiOx$q?F<6P6TasSx9JH(u069tFgp>r;GNb|T4mkoCdLi&NP?QUT z0OK~E+IKvg-9ZeH@NXpoH~am=N2k#Y-LydH?Y3i@hAa)xC%TUOK5?s$bwC(-F8VEK z0JX`SDfK~HQxb13Ow1WOsZ4~^odLxK7`X9$9PoEH>2D4MRJcTS=g(_KDC&elj@< zXkFu*c>^__|0?WP*hnNd-ULY*hXN}4F5bsOxpW@LMAuwNim%Wb@`5IvS`bgy0HOeG|u8`W?jPMm-HY)ypL zi8bt6eoi>7{QRQP@mtFmU6-B>-eSagg2$Gcqz%Oy-DIs=bA%evY_jI!B$>C!!UIAr z){r8(J*!X89H1Jy%^a6kH2C^Xq(mW{b_{rI*l( zr-wYOWAnoCt)R_~I^Q=W^VpFs%#3+(!VsomS3um?&z8u=3!t_9ryV$aB)J~9#sbN= zV6SaftOp8_T=Veo$4BH33d-<}4@lD2q4k_Yn5t=gvh?L@SHvlU6Ymu_&~y3PY?Kqj zE_`;zt@&=&=0a+b8v9X8f&kLsyWuA%>|6qDME?DFJa1~OHeNh) zfp0s2sz&ERhonK@{fxA97sBcozEl=XD$#=w--y|LmgB<43*bh3rmLt7t>t-*{WnY6b zMyaa2FlQfdNp)aj=$b;eyZv06ye{2}LjJ!#-+v?{tD=<@G)t?NB2SukBHzDM8lm1yI| zSb;6(dPtTX^7yl|t39N=zO8K-Mh+plnMzPOnJl!%huQN|#C5K2FwA`+ErcSIuA2YcvO3d4vPb#t=XvFC6w0DLOYC7in&?OtAKE(`({xiqO4Uw{SHlJ zhq@nR+bN0Bnw}l2?Pl$U10F}2x)eYQ88h=>tl6NP`TD}QsAW!(MS};E9u7FhWic21GsF>>9I{{fz^a;t%DKq2@@w(X25RkD_uS8!6nGt);lYyCp&Wv3 z34@fA-C~5KwL)$6ac1P>xV$mfn(P;ux1oY+BZU6CF}{^zBZJdbKEZWhJGcBK(r$1{sEBnjyi*mDBO!-RhZQf@v4}L;32OQR+IKVr zpBK8pF$y{cTf$|JW~a$guDf*8oI z)%tXgl{Pl4PSe>vYc~xZdw|zDRS=~7M)z8LIgzkj?n0W$q#SI@LD}&lKa15(JzhVm zYi0DCUjY`Uk9xsD6y$M9+;;)h-ugV}3E&{kh~;#Rx*SSHtbDACq$fht3;OD-kvqCS zCMc4y>XAQ?!=L{_zI|@Roy%LbK`Jqc{plPH;SDajA7*L|hlj!auSwt?cfwuV!RMLpcPg+07&*B`|V5 zP%Kqp)yXzXTpnj~%=*E*a&rgi${yy~6sW)!Vb{?x8V&ndnrOBDf`CyB8D-eA=3!(F zKdqUI;--_8Ns>(p>+v$hB!*q zN!!FMqLTGT3a%?@aqF<~jgx|MyGd}QxzX+hLQ7bD)Ah8&G~H;DYuFvq;~<#zMwM}2 zdgNqtPhuS;mCgfcg)q|B_cR%3TG7i#w5k7o(!RA zj{GJ;H7yj@^X_Cd+z>KDak>LoFJ)K72;7sgk0u$B29NAXy#=-lYc4C(NU9` z@2E&zyIDxeVvrMdCDy%x)TSzO!kggMps4C6%|zqPNa_j-8bAVm_F3=YS!A6_ry`Jy zN%Zk5r96+uBRL(ywin1ZgME*Vj5gOP4I(P9feAP3$Tf?jb26KvVam<5M@BD1ZrqVs z6y)dbz_N&|DzQC`mpisT{xVYc`Da|�wSS)|7FExRBIkDJj#zE$L zEPbgj+F{Sep^^&LsS4tgJ5GbaAe8{QmpGZJ5;k!GI4hZCQmp)k=)_#U+pW&0-$TI!np^!W>u|MaOb3$|l9eaN zYB(P>pl-R^y7EGh-Si@U7}cP>an(uHTpQ6%<8CIL(_VVqw*qkv z>F2!^%JrR1!@e%CWY5FvGD{>7QbI~BYq!l*3$+Pnf^jFz0AKFWL=(4a+E!xXFprBE zH`r_?;J?xG64@fEn}m*`NtsH}6R4-(hxd6txFJl_a!A*K$rVz(spm^=K%Zp+N9s`% zPM@Uf!`7n_9klk@WEj!eX2A0l1-{sMU#3@eLfOyN=Z38P9avpxhRyR+qt0sOh>JBI z7pLt(jZUsWg7+Wb7bq)PG+?u6W7_OOf@^?{9_D2?TfNS?SU%8j3q)Tz*TzA*r^v9z zG?7v5*PDYL15|*qj@oItL|7OzIrj!+DupL@>#7Lr)7hth-;gjhWk=_zf1Tu{(m(m2 zB`+s-59@AR>ZR{(FjJDZ>m+7o%4-Pm-C`;Cn_Ebn1f{$i7Jd=36ItAS6Q1OH5L}St z_qfayrqwIB#>|v-Nwv6|)9GtBV3&_jmq{QhxT6s%z4T2GWOh?95lAu&=uHgHfh%W^_Nkbvij|;kP#IjcpC*=qxFur%I=O3#*{Wdj%Oem5WTBt*P1$Fhwd2ADoE~5-ZP+1P$pn zZX${+mV;j1x4Gx>bAh(uTD7xN@0oJZDwLNOG{iREj9xh3X1V_T1O=*x2BZ-)mAs+M zIzUl(6+5q*wi=W`P6P7BpFkL+ov_wTc4J|oMWOKaXLHNzEJq1~`?ajB!^iE4<>}hA zm(@{CLweN&6sL_mnN$%=Dz??wcykCK?Q*6y(m33`rtE##?KqqqFS`;WX9*uqB_@x- z%^KJyqYa}0tn~N6S(QyPE9pS-C&);XP{!i}W*>8^`Bp2mmSugx$BPabq1Z&kAV&Nw zRNs~+^*cq9D%2_J^w^K{^_-GTL@mHtTb<-b&1REbR3QOu2TF6x7@$qSl}lyE_+%ByS}a zfHmq&mrz5oZLaXLbMRhU)%RMS6eg(xx7GXK7o%@JW8z<@Z9$Rn0E>3ne z_foKiAAzfi4PYUti6l$`a9JwBZA3#pR`8G()qzB*x-k*B5}7prj7*0omFUN>#V)$j z$|nWaKWc@E1P2{Hw*G!|a6kmM&Hyr?9ti<2t2Q)8n!E5lRz@q8SWPiy-+|*yToj1< z{Jpl3-_@|AA*q6z*2U`As+LWvVB9Bu9MktQ9@Jm6xl&cmZVy$~>JILRm2l(c6&U$< z_1qRxTX(KGhLql|x!{p|?BEl*o^ImUh4L$F3!{}gcUx3x3QO!7;qGAF`oi+NXOrvE zIY8C)CDqMvR6mHL2qdPTkXk@(sQjqhF5SWd1r?}j?u5?{41v~6KE>n|)&dY?eabp`~-yR3Aeij_Ia!>vmpCbkabN zwNX)B#AFN3vocN4TPM$z8f@@KWua;5E+B&vQd_FME+4txCO?KQD;Z;()9iNJ>u62c zaDUVtrOvUNl~j5AbxQVd=vU)0~T9r~&&qK_vs%j{TnXC4A zPcp*=9410w`V;23ZH#kzNHbF7;8aAIGbg@X6QhsQWp7gO={z<$c7Dkv1%rjgtIw)q zUS9;N!$S^Cer8Tl+9&`_Q=BW5e`2U{Dsj^BhU%lB@N&)~I!2)Z^P?6R?L#uT{BHRsvCGCP2CTjTp(y2<0;){_WX6*b_e51^yrOo!|K`C;%aVkEKv_ z|C-tXt^y!I`F#ufyx<==6a;en`2QdF9d`XcsWX}O4CH**pg_{@CBG4Qkj7d7a&^_L zutNY>1_C|5@(cf@m@61J5d%Teclq1oQ70(^pYCr0z-cl2JB|&Joe-|jtg_eHX^14cXFpCZHkNJhL1DNBB5kDzQfSHgN*y45? zyNIo$EMX8mM4h$yQwNIMi2#*t{L}OPiIVgP2MAxiwfSlN6I7wb-G9c4e{jL500bVz zdk^$7awez(jp+Zu7wTDd3c!Rl@N>(E6s!m+_0E6S)Y5aA{@X4ryrgniWbA@@_zxtdLVAJ?@98HIEeCP}*y)S+MY4mroZBvfpROZbL*?NPA}51EN4CU8|FZ0;{pPX|pr?^` ztm!>H^ZK%7+&0^f(6~*Vv>zY>71j1(E*c|qI&8d@>5BZ4<0>T6cpJjKMTXTeTQdZr zmU+lwD0X?$uiVM;H-zTsAbf1q@e}t(d8vXaUxKuHroa)HMaZOClwpzXENdbUtb)v(5RxM0-={LqRnUMsQ>C)5Z_ki(LFjiCCzv;R!zT z{iDYbD59ybI+V4~?cGDr#Q=-n%NBr6z|qSQKN;;+wk?z7s7?@QK=_yaose5&)Hn%838W5&>n`{5JrSww;Is7TqGd&g!r?B=8r#d5m7U(y(!AJt)_?G5Sy6 z$=h(#2Uwq#Hog6}mNu}nHUA4)kGXDD+0L4HEwh~>_`iDNl)6xGue6N#%GYY2|B^+t z0<^VVMS@m?-fOrS|J+tw*Xmb?=8~`J&4|WDhkQ<&wU6k@qtNeP@nA)na_0}fw58H?wr5~&VL}ess}NS!iI8_UFXXy@n%IvuNVs9 zr+VgQw>V3KrNf5TobZ*-r2bOCqhel`vh;EffTBVK6#q;U@!Fe89%9z_TD!U#mx)>y zMBT>6l&y=}WTl&LJt$0y0Gc9;yx1~hbtzw`{`|Jjz0xw0fR6&h}&ik)$FGbj_SREf@I z6{;qmXP#t`6dU5yudEu60wf^d9UrgY-C->W-a64`Y ziiaUuox%^TOAm!SLb7#^Q|vb0YX-&j^Ln={ARP48-5=+U8<`GS4!Mr_t?Fc7{-);N zoP2(U4W&-N=|8!ItnOVLY3F*m26p;pSHa+Bl=K%dgG22;DvoS&dfvY}+N_CdwI?eT z>!VaNdF6o)8Av#QIo}TXZ1L%6a3v(82zO}2F8BA@c+?e*`up|@Tx;)NEDEwCY!AL40j;zb>1m*NW4*u1~u1EToAw;t{v0&8xV;IaQ-8ycD|IeE0N%yp zMi3K3(mmKpi-Zi<9(ARv*~&q{_ zX@eY_bhD2RC;}kkgpE&xEjG2yg0a5MZkcvTk}n`_V|LvymKgi1CA#4|qeS%3Ck|UY zC9K{@x`R!5ze8;#a@(70cE#dB;d%`YdWWFY=&l-Q zOc6U>r)4<90}|S6%1|HweNcJs>1nj#{Xc*K3A~jz4nc`RM;%A<|mL;pUSO@WMtk1mITw$%ILJ1W|-lZ_MW;XKx;M%VV+{=`sC5)6z#~i{l zUuh1vTNc&K+KH4RZaB9sv9==`_q;3z&AMYoszdevdnNc|^@IYxZWeJ0*pEFZ?lSs^ zdE%!q=Slrlftz+}zqqfAW!yx{UI^o{&UK~Xx@*#0ERL~J64PxN*hE5dq;ppH|Een8jGAyrdB%S{_gcCHdPFDZAnmZ zTe40I zWbo>tlas6WthVi3d+#Jg+LLsrGO9&?TaX_-^Dxiz)ed!ww?5m&-Uo34G6rfmMSoP? z-R3K1?BOwMkkUU@(|Vt9!=PLrIs_D4O&h|er32A! zHnJxtqoQ0MCt(bpeE=z=~oa;R%!XnG8M*?p6G{@HayeRRhG zo|DlVwnlL*`-z;R>f5s0gQN!`0z$v>n}e1WE6qVZa2;cmmLtP1)WJi;AW)bEsxhpf zq4A_?u;?T?0F_Ld>DeQq>^-V&W+~eWxt#n#eu_#HK`lpgo_S}<$?o||WHg-k^wSAm zmH(vLza@&RWw@mS6*RHM={ZOCGJx@p(?_58DqZ(XSNRxlw7)M1j=7H2sQgRQ3Fc z{c%U~I7Rmcw3c2jo~-CzZoaYX;Bc<}s#nGUY%KW9x`C5k`+&e0QH9v(CB1JGf97WX z!nc_lJ_ZLXFkfJXgqS{^<@sXvFM#z^F|6MJnCFLI*>LzU5Xgp#yHTf^kGW6c`d4uX zk*;Mq13F0KyowbQ{ytO2XGL+%A3wn5hKf3)wrw-{%4lW`)1wN4L~y!(yd0JF&mYfC zMWIhoq5Tv&tFBL0$ebQCqc_v+1kZp^wUVn;v+#wI9}3aQC}mP*nK`8vZDdgqRB5%m z>{18$B|_)v`}W%lecqOXa^Q{Huu`iJ0q_tg&`u++(EvaTA^{b{zYjhxL95;O9j~;i zSiW^#?=&*_OK60blqexZQmd>_u z$}fk-HqUX+Q)EoBw$IQP%Q-0(PY{5mX@C3#Z#ly2E~PZFfr8}ZzoM<7*amvSo_TJs zSn>Q5vy|?3y4kg1oQy2N!=5KoFT_ZENPh?0bd+eVH=?oe#gDZd$H@tu)-4UgFky!J>e)?xR@YXnrS*X;SmSh^d=C-3Uh`7zg)7nXaX=BxUF%vfxjNwXFoVX z-$m36S0O@00oB~!!`m6}JT0Q;S;%c5vo(FU^uMm)&#vybqVU7Z1NV??^P5ahW!{X3 zWjtH^^fHLl%2koSp#n5G{}^w|?7kyZBD>Fin{gK3giJXGlgcBk-)IJWZ*_}qTAxMk z))agR%)^h;Cg?f(E%uRWmEBRSp|Zy*!~tzuRq#glk1fH#M19sIVvpRQ{NIdTk@OuD zXCo%ef@a08ICL%Bo?wX>L;E9LO<|#^I3Pbiat1gCU+G`T;+4G12bIjyU8{vS?b?WJ zBn25Cs%)Ac^QF2ek?5J=RcoN*ls*Uc9L^Tl@+3>t{3{7so0Q`abtEv#ZB`bCNbbIK zEjP1sNb4zlR8FBQg$^jY;_hrDaLMfI8{SO~97D(dU^#~Dwnpo3)w9uXA?4LCl-0MO z@#W)Is{ztm2~v7gJhk(~QS6R0?~NqJ25{9_m7dbe!~u&zLInf!*_%Yo3=X*O%2v&a zMF%fEtFVA)wxN``m6JI3gFzRP8A(G#b3e5xsedToUGYo%S=gD#a$tZ#u?^{qrG1#I zNAJ%VwDkEGx}~EYD9%leBd&>eX@G)((DpwicmUF*kYAl#mIk<%!CSvh#{QL0SOV@5 zR9t22U-4~>m7j!u@&SCh++{GfXQd8?v*CO3O$6lm>d5ecl5jrVRBYI|NCVaU6ZUo zFp+^}Ty^F(Yu3$PFx*$L(otIpPD^b@a`VC`jB7!?cLy}S{CkS6L~O1IQD3;d>fpLZ zQ%lR8sR(LEma4&ws89LI2s9t{{Mb#6YUf2h0hOr9daHn>T`+upp*Q5$sbR(3{Q$9* z`e7^Rc|2LjT!$>pX+(GFpUGb2T+J%G)mZ(lf%#YRDD{Iz-Ehw6DBsJ{AUBtl5$wM*XH)kFgohL88}MOYL$V)B z+OwV<28X|(T(%tnD}>3-{z!v2d4H=gKW)|%0?lru2!h`Etn`2Q_3R%F`>jj>`**gf z{R*(%f%=e|?Z;tHr>tEc>7N-57j~sD08unB`=4rmjfV$7pJv}{fEw%m#Xh>wWunyO zMrA+JL;`LE`NBog-a7X<(b|7E22UIgv?v>v!16vO;z?INq879&v-&&g4_`7jdP8#8 zsNalzeTfocxb+`$x?9Qb0{d1E2!qlHKdq``H#0emwQp7mu!$g5%B$*Y_hn96ZOShj z@rse(ns0J5`TDLJp%!-U(qfes*ZEqOLqrl5!>yyA#kdZkX3Ju5(%6DvR)XhJWd({D zOU~mMX600AKB>R0lx;Whn;&f4r#(!j_#4Q$N&eX&{}#$*t6!~tzxr3@PHtwm)xl+o ztpEx!wOeCIp1tMzt_RfFw=gZ zG{fr04f{0}z^<87ZDfBDzHZi1jy@anhTYs_oiTdQg-%zfMt~*ZUD$e!kt3ULY+QFT zbUPZjD%B~P-9@V|ZW}DDS4%b-LJb%SgWbVnW1W&yU-h_8kYp(3fIQBrb`$>Ho;mxBf08Qh@NL-+c#X{zzFq(5}J7dj(cVDZ&0K^ArM z2QlPp+u0nm`?U`@Ch6(X?4(%)*(o8uQlY=Rxw!zgG~+GdZWN2g78yW zxAXFxK~Z5P%8tNDWKaP@-iKcV1|&5Eb^!kK0k@a?)Q&tRc4-K&E_})zdl-8ANi4V-EZ!-(J239Ek zf8jd4w;)lIN;AVBY6SKUw{8}34$InRdFauA-)t(hn$`nqZ^|6NkVYHI#ST4PK6Dc~ zoGMb15PysqpnPobO)9o;@xh0I+f|A8u*&F-abd&(&*@BK(yHR1__CbthFvMQ=ueS^ zZn_bH^p+4Ee@sFo05Nh79v=wO$bl9EhZdT>7VXm299>bY+WNo;0BHGPaKm0$+=>?V z%vXj>u`iZ4yuYurRg#@E!)Vq=c_`?tmg_a4Ynv7=)P>3Aur0$f20ZLXBLN?(pD+MG z+AcJ2%w)Vq8+GlaJ@QT1Dv@lnYqWHGE@f^Kj_<0c{0lC!^ z?Pd~r(CFMc(>)VUB;8bk-F7X9E0Uh<+V}^f+VMru5PEL3X3;tMjO<+h_21C77biQb zMP6y$wXX=Pt;4#oLcOFxx$l0ha7|{^Jpqy?Rg5~(Sq)${9r&<9we!7DK@o#F!t+UBl&1W91Mr7lI^c>ZBAW*dxxgDEXChN`25z{L#h^eHB&w08=( zHXKc@)i(%DcKSo>{g^BM4Hx)Poa57(Z#cbzn|BBBzy&P*-Dt|=b5f)DH?V}u+r^qm zdZ)EENu#<*r$`@QSt`RaYl^t*&J0d{2;KnvWUKt*;=d>W4Sv%b_qM}2;{=ISON$n$ql?ekBHW5(vYBQtLwVm~S0&cbDt#(99i z_`GJ_r=f962Ra9v;T`1%OePKP{u4L)7+?<2-2tSQ%HHUf6YY10y5-hq$DwY#PmAC7 znIl^BVG4I}45ZFmw@dq#R{dgof&nOYL!_*I=b;6bj(a*w)^9`p$=SH*&ORsqi%<4= zV=ZXf)rp=osX?c6@+Wbfwwy>X$)?6oFS|$ftL{1l2#`>cs#i$xuB?TtE6krsF*R_N zS)z6|_EM=>7NM^h?C?a>W#DS2+nvM+v(8~uA&=DBGyn#s^cNX6u4hMe8o6()=iv_V~%#4XHC^o7x>(i_QQ;TB%7ziyXWC$ zMk6TTh=3Nwrt#=xq^k8`>h>X<*O}YhVg;lXh&D#KQdQA%X|-$fjrHu%*J{A+N6?F+ zZ8o=yB{xYRQ+i_Ie$eNyCnPbuN%c~>0k=!$$ZrQq6`ClFlK^-){w*7f8}Lyqjiuw@ zs_U32#=PsUKp4zA_N%!JXc?I%f?UL#zLf3apONmVHL%Sw*NH|05e%Ab3yMpYgOPDs9PGlKkL-yHRR`28oF8~q&hly1#){YkeLU)BK( z)T)@K7u{KU@>e`*sj93GJ{KcabxQozOhN*_5yV|fZ4cNXW$f`SUy!gj>I2{+SXUJ z&9Ml4PA-l7anTT+A+$FiUmoelmSV?bW-rG=hXsXFAwu~8398KYl2b*Yr zCyD+g;diOg3{+@FYGRuW!>HFBRIyNO%iB8VA!PBjIj!Vb_reW}rb1>^&smKGI2`R} zF$u>(rG%vNk9h1wIlj4e>75TnT5fmgdhYoVOWSbH^&5LwHL6@2bj#qLWQw?sC6IEh zCV%CI^5&Bd0Num^V&dXhx_wL7GjUdx4-6H7q&angEt{(ll$TwjPb zDqql0EMKu=nPHO#lA__#}CkN|AJ6m1ChSo$jkcZb>QEiu#ms-om zE5}3&DNE(b81HCFEa%EKF^0v=%&_bCTF3ic?|1e2{C>ahpYQ(YkJ_GlJRZ--eLWp{ zAukUv{ZDO$b4lW5JrNvyn~cNDV=ua2F_G1JL7=|*1pkat%U{M)AM-gG?3IRcX6dSr z%GU`oRA@g(eegG>W20b7$vw;J?J zO0=nHxroR)7WojS633`Q!~IC5crDckl3>c|UL zz6WvFX-f^@ykz(Re{H3sft*Nw`)bh3FU=iyF@fJ_BE>XMn0C6eD#hqseT^*o;?5=M zQvpk|`?GYHP6q*%5lYskrG&|9e}MXHkGzD{Xd=c=X~J|J4E6~OWi@PY+B$Gze@q2p=}@gyM4 zcrr~tDoz?{lQ3Q!b)^L@&xgZ`*I6#MKcT--w-mKxp5WcZ7G-^P*7)r^1aO++IR7^( z)^k4GilAcc^S%YD$1Z}3g*xCzLOq@IyRjIuXEHxa+bI=q>w{gg8gn^0*GGRE8gtdbWXjnJkqVH)oHGnV<6iUygBNi z97bP->*V;7^oU7Lx?KB2? z8^)PS2+_NeLf~8azji!%o5HyUC{zRMipNuB(8`3}H8Smn;g(mca;H^^FKVI16krR% zP)aU++A`!e^=7trGj`5jbITS*#{&k|ZlONIM?y9k6gnC7V&^UIwTHKsyzc6**#FF~ z7gM4+2Jw{c#E^4J>!}z%LOG?a2|a79XBqa9@-yHoLl=NP0>vEA&|eAL;Coe1uunbj z+_=u);8D*Q$LWnEQ8@cZVbCTqpB+g9jV%#px)Lv#|1P0?N^@CuRx~gmBqyo*I@c86 zPRX3Wj5&1cv~q)8i=(c?-gsu84_f!f$i|tzf!C8e=^r7|UW6vPe(O#+xZg4?Y4{x* zx##3CVlbpK(DTpa+nVz4lW#u+G_mRla%|Llv)#+YDu@j+yC(rs8E#rC2U);=p4_h* z9(yhYXvGpkS|gF5l(kG+V*3cb`eO9jD#M&aQ|XU3OfU44)g4@Sv2{5$DSM!~h%!EP4b$EddVcZT+3_#|0-VN`Cm$~) zc_R}&f%zl-3y%fAF%JZq{E3F^r@bzp=3qW`9)3bY`@$tKu(QKq6c}8PNLsFp!y9pJ zXf0f$<;7D1V4H~MK*}pKYHW+n8|7S$N#)^JjBb)cC9A;UMoo`eXxfPTPKTA*g!u=U(K`9d3yl_rm*OJxx7<*;d)9 zV3DzdN{?7bjNTu)Q}*!-Qygyyd|( zb2VlJdG!UG`A7Yz`qf3xZn9S~SY=Fp*|vJW^X{S0uB?g>IzRS@AXZLDd!7$grgbSw z+^8dF&o*OG?N-LXQ)ak7jTunHHY#sU2)?MAna70&_e|5lhi7{y(Y@qa=W<#Z1G1@K8WasM%WX7nBxf8eEYyOIkkrj_gN|=o?V_lKcK{B*i;I zyn_d^Inm#6{<+zHd<1etm1&d@0f9-my%Sc^0`7k-bRw(7yt78(2+a7q3 zhYJFvQ0;(y!AeIoIi75d@n${myikm;Dp!=G3u@~hc3ybx(0jFILhXc=AOBD|%^s63 z3-n~wQV@j|;oCyfACQdU;QaBx>J1K~DbWzzx|sIS*GC|k@{XEPUP{}!L>nr39=$(Q0$;uSZ5 ziT?JYI&$!>8f*lR{6p3UR9#e4!B5^bf}3WZxItPAdAi4ku?@Et!kJv9WuG-m*#8kV z$iGHmkO_s5&`CSQ?}0PVndKhKC?|2@-61zdPqmxtLJLbL?;&juhQQ8hPpUrj;`LX| zcnOXeT*aB~e;APgJRn}YZntK}MrOsd>hJ3M8F}Z=vbK$p#>}n!ZLDh^dIIs#=N^@B zQe5I;2($J*5AX{?1I+`X-|mB|jVQOjZ6;0BunjNT4p&>cpO!+OOL?XL?!&P?BZV&LlV$o7tpKTu<0<18v`o4!tf` zYf1XW3k`BT?igtc9&ppG9vKC$gpjQ1J$8ib+MBA>er)*fFK5aq^>z zaYc%a{%=Z*O!r-NAvl9}PVT5Pr5+bm#NaBuiDgXcZ<4YYiZ*0rKygzxUHg5gCN9?7 zQhXl?p}2#c8pv_5F71MCq<^ta=|U4q`4V8SkMWnj>TePPz~-N9#=Pp25x~fl(JWCG zl=oM`?%`u?KmZaS4#@p-I@CHrGYx6BAp| z)G^B(tApQ0|Nws=gt7 zWMI6&{1Y}_j@=lIgNBK%Xr7MvqLjNVn&Hj*%~kxI%SuXGIPYQuBV^(Eq$-ovqHxpl zn+fIBP66q}F9~>yvYmg9Y20ycxVRUXSOYoyRrI?_w)FYqo?hC6I_9wIwI_#HRO&=Ub?D7JY245`|?hC zuql97KcmW-KBcx`VbB)iDoZV=%HeA3!jw%WXSAsTxunq`^zUi7sO!-?{0E-kD!{%Z zSZC=lwHToZ)j#KKL-I+Awd=2Vz7>(ll}D|JW4eztXLu_Ye5n}v(!Loq5)a;W{8_sE zf`v_jWidK!lLM``UQb|bKT2DO)q~#yHQiB^CIzp(s4N`x?U}r!( zuXZg!jE{NuKf~Kex1+0$Yoh`*Gdha~If16d102kTNsLPP z*XE?}*bKlT>g2cPw@t$!+%dwws+Q!|u5sC`PmK!i&$_D7IiwLXq+*#(PDg6`VY_B* zXpu@z;ST6GNpU6(yvzptpyINW7IZo(>@3j#S&nCwYuZIm*UkcI`~5?s>AhttuO)(& zcAr(moU0rOv;5}>$L9gdhu<35OvqO2gxR5Wd&V*#ha)~zfMs&Vl(*M2UE|6vf8SiG zSGK~jo^EL0Li0}7&T(B=%LKi}&Qj`Ffjd22Y)IB8m$45nvY722?WUvc!7xp;_5?$5 zo?#%hAUK>h2NW;DrW(hZo!0io%xo;r6;9?ShzwKSh+Fu-DR}}daO8|=Zs2Of$q02%Ily0z`{_!fMuhrdAyp>S7| zxWYU+;R~)RAGd*ON$R58XulcJ%Pa^r4b#9?m9pZV=CtK~lT4%pu6?Pnjj z^N?xfV*v|wA7(49%V$B4v+=0|4IwG>wz2AJ=7jptrA$ck%nz+6##{j(s<`1jOb4r$4 zFXCV(i?4wNQhh6>*09CXiAtZQ6K8&?G~%LOZ?+7deGG<$T(aDI_v|?9WaVmLh${*) z%EmrNenenAh`^?-3xeux;A`^O>Sb*XEgqVxCPUhjv+sZ#XyX1kgY&KEs7fTc0fS`c z&P)LleZwyxro5{Z6sMdTE-*w*RX`X~5k5c1Yrom2c{w9xV215&3`ST_@}H-LK}~_N z4<*NO-?vEp?MDrPAFT$)?52RV2jNBEO_F`brfOM}z);Ytu;*f8Wy#KWek)UYQ@qrp zE8=Uu?>kZhAOTISFNcBSqcWE{nnSzo$AJl?v5B8xuQ2rz3KkBZGP&?%DQJtzJgwon z@4D8+3jqF9+;IS`FKSnV)@6H`LBDUl`;MytDA#HL!-c@0+^=zq?%fi&00i`uy^Z-5 zwG^hVptpg{_Ebc9e&6#1>>c=vc^Bqo?Xpdm0Bz`$n)3S9rAbvD?_UgB)~u7I_q zu;~C;T5$C13drFl@~1#i*Dgp@em|G@or;rlG!IDClLP_D!+%NlFX_H1ApA`(|K&Ph zlji>~fvseg_BvEV{jKluA4iJ+VY;3y{9ueK+eDVC}IH?OEe85 zY6k;Li)Hz5fiHg6{=Y}~{aTTT_qRKO2HOL|UvPFIx~K#sokvAHQ7Hre7xf6}%jv8N zWno1rkmltY^Cvj6m2>UG?`(+7q+MgxvZ7u1D&a*ka`N7$-+3p04a6)J$y^8=cxMiR znxKtYS|UUa{8|p#WHOmiD!2%eP~MG93YqXN`M5PU9JOuqZyb&AYzAvJIr8NggRM;* z^~Iw`Z(=-&KxeSTRW8taE4TJVny-gMEnfrzz0LjX5YKlW(p@hNOrZ*vWEBJCKECwN zu2bOZE8lwGiu6E=+lrCm*>6D+3c18x$)oG}%aBLfd4H%@gZfr|4n5=?Z<8)5ft2Jq zyP`V&E{wxduC7yYZ&807kMhAA?`+|Mx1E3Yttuxf4vI@h z5P5R7dP@$`U6C%51u83F7ddRjEYt_-X^aN^laJoYz!I3Ev0QtN%-?OMfOXv;rbEaL zBNADt{{{FzC~6W4H!eM21&aN;#U%-D`%0o(_W(-r^!HlNTTZreVuzKPl=@4Q8B)C;5+`(sACe-XzKqK{P0mI zwln10DE7aikSyL#Udkh*rt6`ZQ;(R*U)F3d?Mqil2phhPx0GuvtRHwv z9D2VCv*p#recgY2Oy zpEll&wq1>Yntnj- zfwHgEJ*W0d)=(nVePsky)(o&%0$9{<5EUG5+kaQFN@HQ(F}1cj&^aYQGwA7jy>KNo z3H*6Et3-P1KV_U0_*D`2?KcvpiXYmz^F!{|i;elJq%8Yczq;~3_pcg8PYz=$evKpL zzNB6*NlhjEgKvD)qJ0(~i$SF3=ccg(USDtY*Q8{LFqIXu&;O&%Fr!U4iL|b~#S~z-XTrszE>nFJu*m<|Rl22zQ*E9U~?#*!^GPjNU!|GNj zBuxhC0li`X+=AdY{yV-!SS|<^s&W6&!B%O#Qp-;UMc=}FZ(X9hW7zPAU7n~jwfK=% zRe0r&^G5w8G&nT!eR22I&8EuoBdwZ6P`JtH@)(I@O0n-By&PDj2=Ln~dDhzM{T4bC zO|{R?oS6gBr}uy2b<2%;c}KFV9%RK4gS4A`5A;n?&g2^-7VW7^*MpH7NQH-hMkb~= z!=YbkdC@A+%jR&S*uXfzK@+^QAK_8mD0 z-9f68T8frZ6qpJ#6gVUlQ@!-=bg?nVgOOO0OA+jHf3{hqoYFGcSt6>sak?v5N~83y zh*8kcCX1NF4F$BcO;wz#^#ZN3WU%yJ?ghC06+TZ`<0KaP!9J!)&Em3Y$19lia zcE$}3=iHsDU#s9cBrLQY4<&b>0df4vUACyu+t=Y5pH+7Bx zy+oaL=-De+{Yh4<25z2teWi-LqB9@8hO6sOy}YvV;DMLerD#Uj`#TPKwO-d3socrk z>CIAAG~_=o88jIpE>AGc&#uzpMA+5%%J!Q62!MY|OpqU|?|xZUHcikCQFA{1>aqeV z+af41YY(HM(mg6qK~!Zu!pu0mR>zixhqRJCcQIoy=J{J_+51Kg?$EhF;}Db;%qNRj z2CRh51}fvBRi0kNU^n9+7`hJh)|B+|@Ic^=7(iRzK+h~WriK)zZp?d_dB>!>+&Err zxa-r`F>lJx-ro=vFR-SO@J%h$CG`xg8dw?)Q+LaV%odE?+PG51Yi){(3n$pW2N(TX z4vcm0W?h)lOd!OXnkP_hKW$^Gl;eTx9maZJDEMy|Y&CIYyQccAAjySQ@TB7!%0Uff zvB$mbRX;f0)FrXr{oVoH{g<=LgPq(QS>RMk5UZ@t1@u+8`kAoj?ha`g<0`3Yme&0ZsIiaZc&c5 z{tTV?TLaIx={~yi4f%i{5DrY*V9f%vfxUuzbUtL)x_`)ic9!2}YiF*G>fhJZ60Wrv z?r8$CkxFU-<0!sbQDK$tN=HWr+|2=0kjC5!%W-W$Hs|Q6J!gI`o$n*$j892T~53kF9WjaFPZYev=pJSYH>$a>AN@{ox8|nC1}7af~Atm zDhb%4w}E&m1x{!oRXG2^|C`%c9^a?ygWp3m8M*6FP-eD%?!`FVxY6afn&R5zvasWG zXP^p(&l<~&k7}KxT~HT(h!9mODx7$R(xD|b)w`ST<+poxqL{gzv3s!*oAYV~&+k!< zpI3mzoS6AvIg$38)w%k*8~gWO<}0|e>bSf#DW95|!s}7xru|UwPkQ`A@5^;hEnKW%=pN>{0d_gL+#&0<=ZNng zXJ+KL6oInGfIx95_I5|}WcjM)`4c%_H)57)?a#A&R6r^A7rNbceF{;U6r>+lmRr1aJ%bNPQ=e@$Q(eDOvRcm-L(M3!tL-Q!wMX1QE&V_jQGI;_B>^oTnWjYv*P)v0Nz%8^!bh zn~vC=wSmRqL?ohdKiqy#!DfMysbIa9QyDQ}=I*YHsa-8bTo1-f2#1U>xCuPMwM!=# zTl`RnAIKjxcHlpq8$-NBhI@WY=sX|_c^J7kC2VbH;T$bE3(FQVir+zEcq_>mAyW{9 zoh)xA99cIJ*sg0#5fMC!nF!X_&g4R4KUBWmX1X_hFo@?m-U7T946V9igLpg5d5$4A z;Y$YpxQ$NF6gdQTDy_@70z>5jMuy)^)O{@B_q%1a}!@8HZqmlb0P;@>rkv~lq zMrii$L44dT)F(HLS&r_upRf!pZwb7y_pUqtaag1W$Z`4l(vz&zmpel~27&uI@pWS_ zFGMab_U|n@y_O|!VgQC$l-og3^}4rkY&n_3!IZqN7>W%Ik&y_XV>X)74BKw{*7@ zaUlxUT`~RPo*Iz19;fT~<~q%DXBf;X!?;D)5kaOq90Ggsj3Xmqq^d!OgL}$lg_rh3 zHljr~*B1uzMiY|Jq-v6OiRLW*NaE{B&VYl;Z0nAqn~}C&6@e&3x&60chaVh4)a z8LT-+aHPsqcrUknjKpZ7c#VWfmj2Vv{?_U+Z_9I4BQIY`(6{cLDLxYU3Vr85qk5|2 zK8Zz}XF_!#PlC4+YAK$Y3zfTKiLJV=vg<}yhN*z}eugxG;adRciSWHv>#j)^Xvz*Y4)(RVu_n-$G_! z#cXD}X}fbaTQ0_{{M^c}BL^MJX#|>sl#d4o!sK;N+rR?upPXH*Ro3`RO&3P?%HBX` z#KpS?*B7Mct!{UZUU*Uo`fQ7$jJFruz98C~?|QEc$vIg%5TeEIQUmxpoqljcH%8 z?U{Wy+5MfCGP6_Y8SeQka?`SX!Kh#je;COPv2iSvPg`;>?HBGt@j#$4KZa_aUu$81 z{aUa29T~}?5JG0}dZn&5-#Hh*VNNji_1kb<==pF%d&iAd_pml-749x!&3ic%<6QcI?^vL?cDCP{L( zNP3{>&W5f-RK$|n!E_}f%6&{mO2ZhVcn4`+iHE_0xM}kSAHyPL7&Mq#15R->=#83L z^I9rtM)PLm%~N-nCkKj$DE-W5Lv+7U1CFndb~Ul;x^~H0-a|&^v$Kyf;V0dhpDdVy9?WKlNe#Sh{?v{J`E#E6P!UyrLP$Ikl_#c9r)BoabC0IPsIq zeZBpG*Sfp|vsXB5&b;dGd|eO%*3n&bz|UPwr>S~Pa1F}GW`Z;6 zV?%1OGM4339OCT-?otNAXj0ZR;dFWAqllx5o7hIIsx!CC@%2XyuISy56zqw;IOjsi zDBKKt;o4i@Q|@cx8G+pFNo3vO_;x1uM<>Tn0&kci?TC=@9;-7@FmEk!YoO+8Wtpu* z)D%8%mCVjS)Fdy#_58Yd5SH0xrVUjuz5T6Bf7RszD61kSF;PzZ45Ig;mjSVl^{oalS z&?5Zv-t&ZDf{uc-`*uZeK=IhF^R?8A?t0Sxl3y|z1uA_*WuGH!v9ENUVmv3Tpa`G> zR+j@3Xh20AHB`aWNAG8QMgLKMb>?ZpZr!_1@$#-aQ)pzy=?d3<_1DVM<+s{9(Jon! z;}g~&9ntm;a`W6-GD357Z%RM=1`w070PB*l_vy~B(VX;$*C(g9fp>Fj@T8sA^-tZ` z#UQSewqsKWz`LLmXUFGQf%Oy%zW&OgpqiXoUI>uZq7iBtIc+W;?AbMM$@Y^&-f{R3 z<(D?aYI^R5dsZc9&s4ZQAM)<%r+5)!y4>e_8i+jjQ5df%^-eyKZt^}A1r=1pCrlP) z?2t-I4a2a#j_0eb4B84kcs)Z76B28Q3qh!mJtR%`rPRJSo1e8o_sU6KBt60(&0A&) zUy&4RJ_fIs%G-pL(|{FJ5p~9P6QuuA*N0@!1$V1RDNOaZW={A}D74+OM3x$BareGY z**+K>HDHBe)3_2qFJJKhYo;1c+qcO;Pyg@P`tJbRf+0mkOsQJ+#QN%o@InBc~h5DzLTD;sk~b`p&JHH zAOO8{vz<>X~MUtRDViaUO+7r43AP z%*#@QiQ?&=umtSvQ7l4mXutmK;_jW#J3<;wMX2x(^-OX75Kub$>-#9)8{8HyP*=T| z`0TpXjnpZf;*73#U)|%%@~3p)E69W42=qeEJL1f0o zW^F7&fw(heT~M4^*9`%t?qYeU4@vAFP&39(P}J+6^2hxJ57|lb$BXQ^x{4-4a@K(r z`ZWp(hgqpK@3QkMUUH3yGTN%P%#_GR!_{l#Q+AdL%7~qrg-@NF)+~eZf&*(-3_Nom zF_kkLZWFA69Mt7xN_hdeyLc=CC9W7&8&;SG^pWm!H( za33nm5s@=UWgp$sGE?YS-)+!?23O#QJ4kyP${r~(Y#KULB=e1-I~rz8Cyg20;WHWbSjCphG#!7Wt-11Ru|H{cvrG^(?0 z=m*f*M{Zs7VhzOad|GB}as{5Iu5)gN&bPYP!YUkN;^=!xhD5`okse8ND@yT<2j>Yk z%=-(@8P0Hxs8*$^r_`)lda%L#xGG9f)>C-*~h4a{8qvzZh7I)NcXXkfZ8Ii@jq! z&P2%@`goQ7t&w@SX)_IIfZ}dv*2OjJ>0olbhMH~BeL@@!PXKej6;E4p`M%}0iO zS);Sr8;U6?3l>XFi6znH^sN^eu2za!ZGC#<$fHYvfP1BruBWdYBcU7jk&@UKi$2TU zg&aJXPgF004dF{B$^NEDr*(xXF%m_1K%GW!9Czu4aog+R#g9( zoEB*fH;U~M9!c{sMP{58LWK>)3-Om0?90u*TgZwWlLXBT=^;nLy3w8Pj&Z5y4||zZ zMZo&OjnlpW*m6`f-mlKWO!%Ns&p`xLSY$1@iCRHWLmfJpAGIzRMaLz6jAhIg&+Kp) z(qcTx3lU=u9}{8^*qQI0r0mOA#;-MP+(x47j#qTPB0YQN{*lsMtXm53Xi=7WC$eg% z^=0XL?D#dYDAT+eyW#Z>Bb;}Xuu6J#Y|Qy0viSB2k)To%Q!oeiFsAE6%jWvJkn|HO zpeYSn{J5=c!TmoaZoE;%N-4x2JWJ>GPD)ZH8KyP%hr9>CX*9`SJ@&auIIx1~XqUlI$Sxl*-8 zx=$THTj)C*0s>8>0=~3*lsa;B!@)n^q2#lt+K*bVsK`inFL~F|vU3Lv?X6zIzObQ@ z9aOH2Qcj3x;Dk+R-g22`Hi%fZwbaEH-HC9M`?F=}5^0!g6Qu@G=1E!<3V4wVV0784v$!1KPFwG|V+DC)qsKkzA$B&E>zPI#4x z8tjR_F2vF$K~WAqKoRoD#=JK6PT723>kK=M9VCCLqvEqBzC5 z_wu}EDs#pG%tzr90>PqC(d>zG)fQbAOHpl*u3rP6FG^lU*zkIk3cq!ark>-6k91a4-7XG|^3Seav zFk6dHUvXgT<^+l)sPEOUk7f&qlin^|)7cpN>l^RGT{ZC1pz65dpC+)y%s_unju^LE zuHL#8XdDB@KKvy(n|6Gq2u92VKdwXrY@yV^FqwJP9t`707tkXaY(<~do6{%&ZSsDJ zz_KM;Y+n$lPZvn>eN&(PHd+VR?Kgl^fJU}oVYLqU7%(XG^Qx@5R20DJ;x_G{*42Pt z5m$DAL7jhr`BUe7Q3?N2=EueMe|hF-Z3zET=3mNu*BA4DuYoZ`A|x4tM-IWNSfkQJ zdhu2617=tw=iSI4b(-ujBvFkQeB?QJ4e$34CP0vqr+pY<%x^lV0J~)-uFyROY+&RH z)YMwk1R4o!A;FYO7ySX81Qp_lky~X!wnc1J;#^TeS!aQ2Z3Ix^-F#Mo`C{iM zKbaxoAAMMhZqSN~a`hBci=}3d6bVyOlp<%QYg1r}qUCiiP*!() zzoD1+O}+CwATo$HqMayCgDX&kZwqHvP$Rj*xPi0K*cD7P6;$$MFCjGIuYRlV2t**3 z3K+1CbpY5M5hzh*=ztHy^lbT1S91fuC`l4h?wQQ~ZV$QRIBH6WLOdM)r3Q1z3J z0ah;@g}HUCVg=BGmNG05e!hvvIN&D6?8Kd0Y3brS5Pt<2Fu{-_=mKMl=rO0_h2-72Z30F8VHxVUTF zfpxv!Hjf4>yVAssN;~z}e|JkwJhucjy*r(k(ql6)1L@4L_R#MALAaz(=c|Oz++`rb z``*i@zo|&x_(K8h&_&xedPiT8+pVgW3*|40j^2>dJ2B;b}fj&DJ4# z+UJCu`a2p!SVPI!OX1F?dHd?YI8cP*2}&86^9RVpSqL&NwWxf zSz%7T-K%!B9N`>_L&4Lbh(p)n0|JoTG;*d)Bc@>^I+t^XxVwj%f)5#zL*fuEp~Xa@ zA0F#LV$@anD31l_2e^*AM+|vCZ97%Ex)d+cZl;Yv)!+3~m*>qkAs|n@vh4&74OWfJ z+ZcTE(N2;rqgL3V_U;*9ow9gz3MtAfUm6h8IfKY|I{W4tHy6G7WDpwkoo^PzJ8Vn&Yt$!OdIqm5KOZqJ+Xln~HCH`0@9v;XWXE;UM zA>JB2ayerdgWjYpD}W(3tCNCEH&iBLlJXjz+_EVlN&`jBB2FjpNxFW3O#?;4py&jmlAA%ZM@MBJj;NxKBN?SHD(UPER8fXOg` z30>~2bw?P|{=^_HDPB$2CmXKMqQQ0Z6XgB$kH*V;&Y^5e2Jbm_LrODv_eFFB$EfrX zYQVt$T++~BXfR#v_FNp#DD*5LliSMMnR2XRd%1>g91a=QHTN1zct{_lK21N{#(G89 z#5Sbg(#}xiY8U1XtS!Y2tnR8O%}-$$edGu~2nQJxJ0E-!0qpe6Fwx9OW2h)|IWs^J z+;Es>l8!f&Z++mCAP>Y9e}*4nOagJX2FzJlo_eE2q^jP~8Gqr15yd+;Y6EUn@Ikt5%_W0<0J5lPMZ9GcmFM zScfbp`)fl6`Ol}fLAK!%kGUndC!`*W@v{*kv?^5+G7U6LPKA5;!99^F6{Tw_ar~u2 zz<;B=XU5MF+3y)N#)1@9Ab`|?=Su?$00z_S7x++vY$3haDtiJE5ve3r`afZ>U6A>X z*jq96dHVWR8(0}{aJBnvBWvd;z_bMi+#xajgw$KFnO+L&ZqEquhj8=Ve3T)?$%^1g zd!%ty&Mrt@f8Z;oSqGtvIaGVXq%gRvG#!A9XoHdtinbey&&T=IhAyKEl2aeEfx^oj zv>?dM`pIBOEqxm#I1qT*&XWnoF(yhrUxexOb4op4p6O6(E(ZJKFTm!u(EeEdStl)A z6x_0*DF(fQ$ab-kEsm=TT|ih5r+B~jfz7DOx3A$8k?xWbnNrTglWU-O495Em~62s6` zw@*`$DaTBqi^0fY!Tl9}ZknB120rPC_ycl8N-xO(nL$!#rZ$f4!By2#IwGZPGaF&<{=;87huh;)?;dT}U!K9!!p=xPV9z~icd4d@yp6>m z@LWu!anFB>Sv)7YSI#mf6l+O<^kQC;st*0!^4nNpRA+~=$f@vj=YbcZkrx;@uKQN& z^jB!uuAp(*rZf7Lnev)8bf7mQ^*kD;E>G8%$hl-Yf>}WyJjs~v){WGh9S-el@-u}7 z;E;w#AvpGyQ<^mz_E~j!7@9X88c!bc_CXT3iM$a}iApSd z(bX3}vzaKUr<(ZWbdm>db7{3?hAxAf~%9)+TEamyyCnm;NmEeMRQc~U>w!Jq30sVYN`T1R$M5O1M-QOD4Nmo5C7|TE2k53i8heUR?)#G7 zf@kQ}o$yRo7QMFqTB@=foo?+HwPVVk_q@n$%QYP5q;jo zKso$V=V7|@U)VOK?XOLK5e|60{t*P!gg*70Txkw! z@~x@iQ91x8$rC0%%Yp#NDAN)M{iZ(++1S~O^ND|E`YPGN$t-~;8C743{R}1O*q0OQ zxK*QS{+7gPH_`M7Wf1Ui{m*4Qz!z}W19&(8euztr8p$tWnj)%97?e2n=ob!u<)eiq zJ50Fwvgv9A!Y=4cf5ISW-6#+%TnZD_!C37OmfMsigZ%{b`0y`q=^dKig{zwFB95u8 z&J}0ImvN9HJqrSAgiDxIL&gYCBRp4vI(`+`bfq7wXk(}#URZ6l#}EDmL~8(oriL^X zsY6YUE(MM6{V!0gLGd)nWob8BMeIn>D{+jZ-is2pR!s#;C4nOan2b%5Aa3@rNE!i< z6vYG^i~P6Ri^yz=bqs(SiD4p)C;uH~CsL>=$SA-FM=TwZfj-w}gWift@o>}++^tgM znhW_%5fwG%Z}II(c!zuZF^H+a0Ez0-u411s0d*Jr>W0RE08uavLGk>}_kIHqcz{c; z9I%71CHg#zW4}b0|MXm$eh!Zxu|kO-;6Mcp#*q?ybwqP2)i z;%ns=n}&(YKI)$XJ%tJws}gSjR|dJB{w_|9-qFhkqeez6s1xKu&$$=S7n}Pq#&nAA zeK4GY!Sb8tfyO;PE5!)NJ8?MaVJyxk$Qtt;mBJX~4J zbL9rX)*&TJ4??+@zo$}J?WaO$CjVh>E$lQXqlGz27B;2XBQX*nhF(?48nGp*aETsM zMNP&PNM{PF>jg_dwz~b5L{V?TO!Hc9DTQ~HSL#B**C8aT^&_Z?-59`s{@C@WYKeBW zeuM0kHyIl*W{+?YFWV1-!f|J_2sdbT5`El>=g?)Qe zm|BNF-Zm;|<_WyXVXzq9yeNI+9z|CY?=kGJK=RD6c9oaz;N7d#Fis}5V&2=k;`gc!>r-qJRibMB#HOPX|CC z%L=PK=FSqJ6rkwB1Tqu%<5s|1Ptql@{_sRfEjw%Wq@@Z6GQ1!{rIflxS`CT%ebMszifF8gJ1)-|+ zu$CC$vLotvuHA|tcQ5ikK%bx>Sk11-HsK?t1fc^6VdVml`+ZKBI3JQ`|9IBdV-*17 za#F?j0H(ZSSH1mSKzBq|POyN30^n5AxKKDr8j!oBk&#?=%&0a3bQ+jU`~ryX`22JEmHmev0oddynU2eV&WtHg zz73G~KfZefuee}Kb=WoTUz}rnZR_}aM_+u{p#p%s8%Y>|TU;aRiofM|xwCm67?&j$ zZ&B$t20a5N0mZD~DVu%6P@=6<+p`_e*}AV;1681`4$r_x-zvEIYtPJt9|uX^xEdx4 z^81Wz1?A$#0ej#Iue%fjh~5a|15A@w?BeeGjYzMrxw59pb5%8HV12Ko*;=4L1(>aI z&brqC<30Kp%^-_^Z}VTK6(>>umCgTM1&P7}cmtvCf_@TemKFjSs?XZ9Qe}_W&J?>4 z9<&1{GjV`~kPs^w*Kdv3!kujsoIgMdR>`u2ltP(}csN7W-I%)P(Ug zmE(;kgtwx?rcS^?8$X+}P`va3K(_og?|Pfw07IMWa+A%b!IXA&0-@Kn|$@ z+lmnGaf632gt-&OAaSn+ka+K5m?s88#9F_=6)@xb^pSum1gKj0t!t;WqvlG(XI)Mo zW(mQ>{>z~<5UqPNAqcI#SbLz!Z!%n~mN3Kf4EMOyOf=>Ej)iweB0N&BZ_UQ9V2cC_ zUg36^*Hqr0ZkP2l6$(}=Uf3hzQsRBmitxARvZvkG^#}X`+V#bt{V!MY>VW*&vKyZ* zTC585&|);!QJ>54)Vg-MoZo&dyM*8_jMyY3F4EOHS9T=9$GG%vw9Q91&%=JTL{_$i zwU7RR-yuwysFMHBRPdBSyGJ@o?5Vrq6sV5O-dh(l^EB^R_TVJ-Wb+f@CF@O(hdD>| z(0(*Z@xw)`MXTgxwv5LN6_n*TqG6-9Z{2Y zck;|ZNKsaN%=#kGy?HhtyV7TTR)WTWt`@OxOu2YkcAyE2e9?u^oU6V_nGzX^JT+%0 zCS8cr&jsRD356pfa>R#*=Qg$<)iW>JlE%fZ>XW8&j^|VczRHvPY5N1m`dK;!@oDP- zHvim*!3a&S;X5tN_LY=C*}oi&j$XwnL*gLw-rS&H9`;j1@~(Y@CovmUL~C44N&~x$ zXE@ZzN@g(k*>Foli{0DGy_Bl)9q?Yy>Fd7V1}SRq0Pm}zQ(|$H07%|Xg&ft{Gt;`h zsQSZ(hICxCb^gq9;(~|Hj`dv)s1$XS@e!ao?U>u2&dYOd*+Ho#3VPkx4z#Yixfk#D zP|u0UuDXm49vt6>jXe8Ayg&kC)pFJ}?oc$lD+} z_>-Wldb-N@4luC6W!j0AJ>RI}D7teOnz>dR?00>w)(#d8ktDlD|1P}pt2LeF!-_Sy zyp$&S%2BN;}9YI)bliwxB6?f)doxjR^D{5Dc~2gJV+l?lPv))PP*|_5QEO&a?5n!ao(&Njwx`v~H>^YQvl**&U=_aFkd&{MYm1U)^D2ue$7fHLQyx6SrTFnwGGt5>lLbmreP%e$YtI5C)IMsfPCEN zQZW$7e?j}Yfq>U3aQ$bQ2)#2+=Q?#4-fC~m8>>ESlsh4Pgvyh|=v($HXLeKAu9Wn{ zi<6-9vYJ^fKsy497CA_wtp?isH8c}~q(x&>Z#ILfKg9F~{T@5uU`_k3G%rY9QZR8- z$2>^l(h{@((`g--u#u!BiF@Z-g&9bMf;f@PZz?^tpZ3$J{J{yX)_)vnC?;m}e z){{#2u)`tUHvLM;NZ%gxe2fPjit|=w0=7VO+ne-cQpmn1WM{x$NSxDdJt^xS&-YnRmzk?nw@-=oFD( z5K1M7^QOqwHro7nUu3)g-Zn}=5Oma}@%nF+jPhc8vz!JeN@rCD{i^Jt$XPb#Z&icM zR*Ii~6@#(0JAe^zX0sv7H^a@x zlKi0P77M;CyL3ZI%*|elIFpDrgVOk`<0$gl!=7>MO!0CnZb|2!p zy|Q%r_1aWI+Eg@mT`b*74#0hhM4W=xaP)@9^>R!>4I_GTVtLR*vo}|J|2^W3vQ*D=qY`zZ#H<1>Pm>v zbb1o<4YKlyYZY779R21C2osr~Nd#Z^S4!Qp?HJCgRo=Z!|b%G6Rr)V}eml{L3TRZLX$ zWA5NUUk{=0TX2szZ5Qyj? zS`VYqcJdi5KPz=?fOUYVloYO@zZ}hodG}=(`A52aHdt_989&jkk}z#nk;PgF=Pi%D zrlqWCQ8$5spSjQN_{yacvzrtO~Zb6|Xhl%`N@snkD2T6(l&G`Y?~% z9Kwl(VI$5zC%;Mh*)T9daIDogy~VH7bls#M%LIRyt;OY{%wel5&YIZwQlT_r^rNnccd5^hgdo zHTWJbZtH{{hjpybi#2r}tqf}ZE@t?~dJAwS>o*3fDbl5)pM=FBJQ)EWUDE^tE{-dJ zXR+>_Qg%_@O<>cM(R!$Db!PuGLQ_2WUNaMaTHs6o9symH;GghY}OtL%%DioN(bRM#MZD#y|KWQ>z zxb2o|u#}g#b5TFMz3+rU|25~)W}U!LL(13F$W#B=My~ZJi>T`F0K>pM( zzI=ZEMy=m1HdudWrnSt+HLmw4b^1QOPRgSZq$7PKS>cV7*)a`p|N9SP&TJGPWBTca zhsL`+bB{JjSs&c>1P2m3^nqf1{P-y8e9Yj>lF0XOfa!L>(E2li5}IzDfRG zdTJDZ`zpj9Gz;7etL*(WYH?Zd8d!~`!Em;?)=^Nmf zNiq6neSz$)ESPt_a+ncYU~evr_^;H(YY4rj$T_xQj=p5~Az6ZD^qMQq{B>7GCF@G^ z3POKEZG4%iF4~~=^MsOMUdYuc{$97=u)`u;2Nzq%7xb-!7*77YsHjW)woo5}TxvE} znT1iW!CkJeEu^Q84h%T?f@=(LI>aXS!&sE+#kK-wVI99orWoSa429u-9S*Al4W+P8 zm(9lQRqA(aU|Pu53(%tlJL&8~J8@co;DX%u4ok!0_ziNHWtQ}`gA+iU@?4%~K`L>hr(F62LJ0MzXPv@_Y0Q00Z5ID{T18bzS9pBuo(XSw z^UD{cN`Zthfyl25i@lk9ctC>lJDrbZGggMWDmk&&tJ(HxS#7%nwGW`i1?;JOEccze zNmSvwlsZf=F`uowlR#w29m%mIRI5X;EE9iYJ@?dEytUQj75e%yC(b0>l1pq0p^OgD zHk;FI%L~|la5JwR+p!R89vu2TBKg844b8dEvVo^iBeRSqpgg*idCETz=FoR*Jk^3| zx6Mkk<{>TedGfC7wGal``r6eCSL|PyAkVK8AD~Y>-J@(dcZ~4P)Zy1bWE%2{aJ=G^ z;0^4G@X3A?-OS#X$ZMSoq0apNEA$r>%zK{A>VB1{4lw%=%b8jO0G?+zz@qkOw9#k4 z@|ik6cCRMa)IiS+W-f~kjo(~z73#B+oY6zUV-E&7t~-~NZUnavc(dHnFe!tv!9c-^ zyMuf*71!1f>SF(WTG-riyCR`*2MXSm?yeCwY}--M7N)p9#kfXKp??~8AwQ!uO*-S> zF(RK>hE;Bb^lh1SEIrBmb$m6!ySioCLA@oBc$$P!IRvy$k;pe`6iU`>k0brZ zMBd&UhMX#Yfe(a~NBY{4X9K?q(ick0=cv~Am(fFnx>Mn}ymNCS2^%d!ZrLWU&kY!u< zdNnNku(zSPJ}t>=8W)WmnaXH?_5;DyalD)caTxD_T4&TC#|sPvAFuRJsD|0tY^`N~<-<3vkiVaq$Du#r?snODERl*`S=t$$McBHbuv0 zfi)X5YQ0z5Ng`9Rz%0{FYg5C!&TNta&cY|PbLzD{sz8TXc!Rk?*U>bcd`(ar1OW#e z&c@tBjOJ?&gvEzp&4LzN3|Duz9s^k|A;M*EECTu28Nb0;CWsp<281#>P zitm|dbOE_T0=!0$wUzo_-Xu%%)K90C*kf_1!=u;BY8U zRq%aq6)&||Ik$JOvhy`sCwKP$IpxP1>mp$=e9tHM^p~>QE-|j+;PApNmIVAtuV&XB z&`rc#oyvkr54WVGX7`BumoSy2xx19+sqg%OaXa7fI&+YFrtkN=yKf{$@lp@M_drRW zBlOP5CkA1{qNRaA8o*PMKctKo0#lL5DN0GVw*A-TJcD-haiQA8$z)#MLe4L^o%AZ@%UeYCtvP?hC+Z#w~_{nX8k* zQ0>j2hghi3ok6zY+AZr^{dZPzzuTkO61kT(Sg}V}*(At^QrU%dkwQg`NfB{?I1)&@ zy}q#0eY_n>TD!!`D!?w#>_Y(0IAiA z^`?#v#Pzb%PPOLQKmt#bvN(H&ck8PD5Sw>Xe<$sXVdr}< zA>&f!_b(ga%p~#Cu_Aob!PP7^(zJ1T{{&jjYK>>ued9xlfLx-5r@M1j=Bk}8{+SLj z_SbIAVeM?jVe7B>y9x5Fo-8!1Fs;#HmKu4xjy1zlS}wyDjKDe=wN8F>U6l9t)m-OF zZcx0}t=s7bb!m+e$vby4>MZEGc*EF26N9**0slK_Vcp|eQ|*q4+?Gu6ejCZK&_;}S zFMm@ln!X(}8vja@#3^UTG@ics_R=8uRihcV<-Lz!YcRWeA6)6(=Qz;~QAZ6jNol3} z@pOLvD^iyy8DA{>&0WXLkIQ@xXBqz1L}JD{5clA_LQSeq(Y;;q&7z1^#{g?i7Hga8 zi>=z8^T|~yR&2A1TZf1wV25IY z^`AVDjk1^0Y@(IIxETblN(<)3G(2SeqwnDBcACk=ZHg9-H<&JaD+KDo4yV03y4c^EZ_vgKT>PUbt<~$Hcp%YIZAVL!9;z>QAa&Ki{pAm1@~Q+Q zMRn+hJbnIjoft~)flejWU|;$8m$7MYkjG0GI_hNk_1H-JAjzpSqhXk_4F=xkWLzDS zZNl_nfyr-k2IZJ}Ou|C3f%d21Sb5#sLZ7+J4rlA8 zC(Uk&Uh?x_sHh0{!G_nluK`>8MqCAec>qJ}X@`zbI@?Om9!%xizM`arzH*P#zBiDk zs6(3dU18heF58COohVmVU+co9T}Z73-jrGe?QLOHtU4##M^FtV&G$e zqj03yaT;zYgoWw4`lON(;4QVWjT^R@@(H0b; zVxi*=``gB76HesfsCS)lum{=A#B!U<^}EfFK*_D#y<}JNw$aR%EQlEg$gmKD zn{<{>9QGcS>CwGWq4a)kXFN36g^X0oZYj&_8CM7pB!!cgYiw^e6y26hoHV9Fzo>P% zJ`)$L(3Q6jN580OBkizBI?Bo^V_ZaITu8-_ClIR%$-CuUmK(JLv!r>F<;}s)qa(HyF&Lijf!;|mJ$gf6olOiF)74&cqTDrB z(&-@WL~K9f%JrngVTBIN_P#Qt!+mIw6E}ia4_Kpdfh_$YtJlVq-B|mT7I%Ijz`8N% zGl?6#7q{R={q3e`%K@FMS;mbGVeOvtH2a=1(DKo|g0Kv;`2*2AO+?KXF5+SZ0T2Dz zlfX2by%T6^iq(D*75S!Z)1p`HKrhQJ4O!9Tcq%QiVw$&H)QU$G#PirutcG951+e2A z#KGV78?qe27hvKiw99X3IZ8+Ib~i60*k2OgVPh9cGyj!0B9H-tDJfmJ^f6CgrH?>sXoDFGqvc(n-sx%%+tuIhBKP9_R}l)&0kxnKCj{? zos{IoJkkJW8^`Zr%nG?rTsE}&5HL2wY1bj$E3#`TKRoWrc2P58{dQ5Jw_3nuc$#=m z_36aP8DJn&{sf{pTrLZa4PT}PhLC}&1>;dHvUfM1klG0wEs&wQVw6S7Pea`#sW^-< zs_m$t7*aB_-cVz2J|v7`m@pAT8_5<$3GLszFY~DW{ZVK5L2LTqb3XddP>TlT{`by1 z8#AAJ;l4k)~AF+)&lLBM8P+koz-fs!A$`nJ$0B`V9&SGQ_8j~AP+);^vE zRxA%W;5N|~ z9M^|AROH7>$6TM@w6s8i=b7QVv_>IV*RsFNO)yiiFIOcAj9s&?7dwuw!1(MR9X#27 z;P`;=hoYUx@hI~n>g#XcdaU>RHVPz9O;ZnnZk_o2EEmqd9G0jC4Or1fRyaD!Fw$3; zgFu-yO91)J5)tUy>jka+F>O9R>EVu&@uH`&UO?QH#Y*nnNb=szvm~5o3%%cJA zj%K^IowWN}Q;gH$Jp+h~5W#&rAqja7I>ZJ5-{QD}7}4Pd%PeXT8i5ao~2M#=+35Kh0 zY1I5&hv_epa_71g(GH#Ih>+F<*ysY1{6m%eGhNNQ-*J6Oejyt4;&Bl8_Jp$nkLB1q^SEKW9-pK1>OTPmx41F+*B{^{?11Z=*OFwGSv_?3Q```zn64gfwZ~eo%1RW9rI0 zO{6FrLN$|IbRIpQTvDPzsyAU7lhQ8#TkFWIBtvnff#g}{F|{69l`T=H`;?teHbD+% zC!5p(gKq`uOj%i5xGsQ%UnsNR@Fas`mA7t1#3|OJ_+(BqGF0g3cNG7-M0Y;mR9vTI_swxN=el#NIkD)m)N&%Judj% zcniSQNyd)=8S@)Z0KEgKnI{R_UmM!K0My2WIW6#s7+^46$!V-Ym9K%+JKVo2)M~yF{hw)0RRpw~n~j4K zbnRaw{49MHU?<_uCr>S1Corzany+uPcj(~HFbAac?nD(-vUIh}7zf-!JG7<4@SyL# zj;-C73H#7S&GVLvO?l>N02uvWVu8AV*ysQj9cXCl9SleO*I<`$=1=%G>)3zc-o6LM z1D*L71hR3(wrztG(<;AI1BBLp+N!^hXv0nZ+WPq68}`{MYpuT~^o!q;aCO!GnpJO~ z^`K@J2{3LkE%C3({SPEw^7e1Ht*<=ZEV250qhhW;=rKUJqr{khr67FH4KFyBAXv;l z2MlleON;wkcI?v%xBqfa`&-RGyYBy>np0=^Wzp=Gndtif4itV5dN(uK(>8!1z9`$z+G7rE$!WfB9{}!oOQARsaKXNM0H0SM z&=x*JA^&Fe^`AXIrl2+`;~Hk@W&3%jLDyzq2ISuYbOxNi0W<_;i7{)mJOEaDZPQ=4 z<_D)$#D=jMMw=@y7<4QS97%|g(;;XL*SL;x1+3}({NEcF{^^;4-YkGu`mJTpJhQ&! z28MyO8ZTywSI}B3XWBCi#a19P2Y+@6@*(9iSaACfcU#i&9C`a&i2rN_^NnU0|9`Lo zpj||Dr|74T;1^tma-d8b$t}Y}6eF~`*6W)=$jp@Vk+g-$npSeB%jVs)x1Jmha8^6% zGnda^ES#MI0(6zlqS8O#do=#Lyj2(VVsmhN>P$1{7aU>6MqUTo9qRJPHov0&);f7g=68E88N+pW zkdo-^_)aXSOQG3aR)5=$9-HPoqdL^;rJsu|qn?-AS(dk8B2H&Y6cwDrm&g0H6E+r$ z6JK$Wnp{n_Oiq3Lw)m$l2b!4LYMRz4d;amGCLYpOB8GaD(9fQjqw`evBoTeM1oX;_ zBP1pZlM|icM~OdykFnRY_$i6=bsY=EkIb};YOnr5X=L{l3TjCS$XNA~~clMk1KFH=QB&3yYMPqlLJ zPK7a1vPMK(B<*87RE2WlcBktkGM>j0NfQYHlSAjN)Cdb4)L!Nw~U2OSi?|<($)E$I3FOW849N?$9JA96zMleGu zd^#06{o7x-2vteNT|nc;;WP;8=>2$+omUk2={(NQs4lNVXa-GEQ^=GMzt3vjj>CSG zO);8BMSvbqo0yI!9w3gfFc#Vz+q@lDX9iuYrkQqMv3*qf?96muDe6PZ<~&D9Wo4c# zR<04A+0Ad3t|5+eZYDa7iXT9CtB(TW_)kQ&2>CtYeo5)!8 zr0M3iVlaX>3CL*Gi1>uOzzNl){UH16QXN&ks^J}dKoPkOidXiD~|D)kKw5)bDdvC9P3 zll@6Ye%b6TWZOP%j{Vb$C{*@J3eGJ_xEssjW8Z>ZTnlSaTpRiw?)GNks#`}V0hHpx zE8I?-MJA*O0fPZ$J^x}y1;h`pCPn?R$^tu;H6SPWXc)5|nbi*`tJqQBX*p;a6^cPJ z3aa1B0=*1CMt9X(YwGrSpkp>)5V7wd28{TWi$#4HF1WoULERI5(bL|^+&=d7%yA zM6%79wXvVz>Y=M*SxZ2RN~tZfkH{$^# z@0w%QUJ1-uJiG`n=*N6Ed6(XTu!g}&ar}$#pVmr0K2S>8a%wkn!n?!+5J^a!Jz(BS ztNj2wSFZAEH(^i`YG1`iw9LrCfSz$63sD+Uuu{gSXJ6e33D=`yJN<=~i1=7^GXwxd;!#s2v$ z-*_3;N?+E<>_ZKW*Dxh7+tJ1QCXu?Jv=JsWHc&=HjfN@OIZOx&!6m6Yvi}qC$BtlR zQaOUpnC@&NRmcUvh~WG7!anp{MVe;s^pxxLK#!w?=;dVKTa{nJMA5i-siHCuDyd@j zGLT$sm_nwbY!FHofhfzcfP04^u6pCzKjQ7v z+P^RUw^;sOEq@!){{XhA+KHubRWWzg{SRFT0$tni8GW)7U$V{7u)t&Afk3E%+kxQ9 zMmwxMOy1rIOpE%-Cmscz@v)DVyu6+qKVuz)hJ#!0DkZM z^xZyx)m}7IsS3@Q4c1@(00I%7$d}Z?!ib*+a0LiUV3K`x&bHY{RLhJeO5{L<9s%SS z&SV<_Sd0ox_zsj%m8>s1RA3M5rk$237 zzZ?a56)^U`yEOX4OeC%xTF-kSYX6ws0idsb)(-4DZ1TKwMVUmm(vf2^bkiDAU@3sw z)4wzt)6ji#1!4f@~ur)G2s9J&ZmrBeFle%Ea*nx>zrKmGHUM{$u~B`Fp~ oI&tc%T?QP7@U`*)UaKX(2<041aRBme*a literal 0 HcmV?d00001 diff --git a/corbench/docs/deployment.md b/corbench/docs/deployment.md new file mode 100644 index 0000000..fd5b0ef --- /dev/null +++ b/corbench/docs/deployment.md @@ -0,0 +1,417 @@ +# Getting Started + +[Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). + +## Deploying Corbench (For Contributors) + +You should already have an aws account set up. All these steps will be using your aws account. +Before starting, it would be helpful to have some sort of note taking system to keep track of everything as there is a LOT of things to remember! + +1. **Create IAM User Security Credentials**: + - These are the steps for creating [security credentials](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html) on AWS. + 1. Make an IAM User + - Go to the IAM Dashboard + - Under **IAM Resources** click the number under **Users**. This should take you to the IAM Users page + - Click Create User and give it any name you want. Click next + - Under Permissions options click the **Attach Policies Directly** option + - Add the following policies: + - `AmazonEC2FullAccess` + - `AmazonEBSCSIDriverPolicy` + - `IAMFullAccess` + - `AmazonEKS_CNI_Policy` + - `AmazonEKSWorkerNodePolicy` + - `IAMFullAccess` + - Click next + - Click Create user + - Go to the user that you created in the IAM Users page then under permissions policies, click **Add permissions** and click **Create inline policy** + - In Policy Editor, select **JSON** then paste in the following: + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "eks:*", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DescribeSecurityGroups", + "ec2:CreateSecurityGroup", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:RevokeSecurityGroupIngress", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:RevokeSecurityGroupEgress", + "ec2:CreateTags", + "ec2:DeleteTags", + "iam:GetRole", + "iam:ListRoles", + "iam:PassRole", + "iam:ListInstanceProfiles", + "cloudformation:*" + ], + "Resource": "*" + } + ] + } + ``` + - click next, then name the policy anything you want. Finally, click create policy + 2. Create the credentials + - Go back to the IAM Users page and click on the User you just created + - Take note of the user ARN as this will be used in a later step + - In the **Summary** tab on the right, click **Create access key**. this should be under Access Key 1 + - Click under use case choose **Other** then click next + - Add a description tag describing the purpose of this access key and where it will be used (Cortex Benchmarking Tool Deployment) then click **Create Access Key** + - Note Down the **Access Key** and **Secret Access Key** values. They will be used in an auth YAML file + + - Copy [auth_file.yaml.template](/auth_file.yaml.template) and rename it to auth_file.yaml in the root directory and fill in your actual AWS credentials using the keys you just created. The format should look something like this: + + ```yaml + accesskeyid: + secretaccesskey: + ``` + +2. **Create Public Subnets**: + - Set up a [VPC](https://docs.aws.amazon.com/eks/latest/userguide/create-public-private-vpc.html) with public subnets. Steps are below (you may already have premade vpcs in your region, you can use those as well): + 1. Go to the VPC Dashboard in your aws account + 2. Click **Create VPC** and make sure that **VPC and more** is selected + 3. Under **Number of Availability Zones** choose 3 + 4. Under **Number of private subnets** choose 0 + 5. Leave everything else the same and click **Create VPC** + 6. After successful creation, click **View VPC** or go to your VPC you just created + 7. Under **Resource Map** you should see 3 subnets. For every subnet, hover over the subnet and a link icon should appear on the right of the subnet icon. Click it to go to the subnet details + 8. Under Details, you should see the **Subnet ID**. Note down all 3 subnet ids as you will need them later. + 9. For every subnet, click the **actions** button on the right, and select **Edit subnet settings** from the dropdown, then under **Auto-assign IP settings**, check the box that says **Enable auto-assign public IPv4 address**. Make sure to do this for every subnet + +3. **Create IAM Roles**: + - **EKS Cluster Role**: Create an [Amazon EKS cluster role](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html) with the following policy: + - `AmazonEKSclusterPolicy` + - `AmazonEBSCSIDriverPolicy` + 1. Go to the IAM Dashboard + 2. Under **IAM Resources** click the number under **Roles**. This should take you to the IAM Roles page + 3. Click **Create Role** + 4. Keep Trusted entity type as AWS Service, and under the Service or use case dropdown select **EKS**. + 5. More options should pop up. Select EKS - Cluster + 6. Click Next + 7. Click Next again + 8. Name the role anything you want (perhaps CorbenchClusterRole), and click **Create role** + 9. Go back to the IAM Roles page, and click on the role you just created + 10. Under Summary, there should be the role ARN. Keep this value noted as you will need it later. + 11. Under Permissions policies click Add Permissions, then Attach Policies + 12. Select `AmazonEBSCSIDriverPolicy` and click **Add Permissions** + + - **EKS Worker Node Role**: Create an Amazon EKS worker node role with the following policies: + - `AmazonEKSWorkerNodePolicy` + - `AmazonEKS_CNI_Policy` + - `AmazonEC2ContainerRegistryReadOnly` + - `AmazonEBSCSIDriverPolicy` + - `AmazonSSMManagedInstanceCore` + - A custom inline policy with the following JSON: + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:CreateVolume", + "ec2:DeleteVolume", + "ec2:AttachVolume", + "ec2:DetachVolume", + "ec2:DescribeVolumes", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:CreateSnapshot", + "ec2:DeleteSnapshot", + "ec2:DescribeTags", + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Resource": "*" + } + ] + } + ``` + 1. Go to the IAM Dashboard + 2. Under **IAM Resources** click the number under **Roles**. This should take you to the IAM Roles page + 3. Click **Create Role** + 4. Keep Trusted entity type as AWS Service, and under the Service or use case dropdown select **EC2**. + 6. Click Next + 7. Select the following policies: + - `AmazonEKSWorkerNodePolicy` + - `AmazonEKS_CNI_Policy` + - `AmazonEBSCSIDriverPolicy` + - `AmazonSSMManagedInstanceCore` + - `AmazonEC2FullAccess` + - `AmazonEC2ContainerRegistryReadOnly` + 8. click + 9. Name the role anything you want (perhaps CorbenchWorkerNodeRole), and click **Create role** + 10. Go back to the IAM Roles page, and click on the role you just created + 11. Under Summary, there should be the role ARN. Keep this value noted as you will need it later. + +4. **Set Environment Variables and Deploy the Cluster**: + + Before running any make commands locally, make sure to build the infra tool in the /infra directory for your computer. + From the root directory: + ```bash + cd corbench + cd infra + go build -o infra infra.go + ``` + + More details for building infra for docker upload (non-local) can be found in [corbench/infra/README.md](../infra/README.md) + + + Finally you can put all the setup and noted values to use! Inject these env variables into your terminal + From the root directory: + ```bash + cd corbench + export AUTH_FILE=../auth_file.yaml + export CLUSTER_NAME=corbench + export ZONE= + export EKS_CLUSTER_ROLE_ARN= + export EKS_WORKER_ROLE_ARN= + export SEPARATOR=, + export EKS_SUBNET_IDS=,, + ``` + + ```bash + make cluster_create + ``` + + After this, your cluster should start to deploy! This should take around 10 minutes. You can check the status in your eks account, as well as the CLI that will spit out status updates. + + If something fails, or you forgot to do something, you can run the following command to delete the cluster (this command takes aroudnd 10 minutes): + + ```bash + make cluster_delete + ``` + + then you can retry by running `make cluster_create` again. + + +### 2. Deploy Main Node Pods & Integrate with GitHub Repo + +--- + +> **Note**: These components are responsible for collecting, monitoring, and displaying test results and logs, as well as monitoring for github comments + +1. **GitHub Integration 1/2**: + - First generate a GitHub auth token: + - Login with the [Corbench github account](https://github.com/corbench) and generate a [new auth token](https://github.com/settings/tokens). + Steps: + 1. After logging into the account, go to settings, then click on **Developer Settigns**. This should be near the bottom of the left hand side options. + 2. Click on **Personal access tokens** then choose **Tokens (classic)** + 3. Click **Generate new token** then from the dropdown options select **Generate new token (classic)** + 4. Under **Node** add a general description of what the access token is for (Cortex Benchmark Integration) + 5. Select your token expiry period to **No expiration** or choose a time frame but keep in mind you have to keep updating and redeploying the tool every time the token expires if you decide to choose an expiry date. + 6. Select the following scopes: `public_repo`, `read:org`, `write:discussion` + 7. scroll to the bottom and click **Generate token** + 8. Take note of the token as you will need it for github integration + +2. **Main Node Pods Deployment**: + Now we are ready to deploy the comment monitor, Prometheus, and grafana pods in the main node. + Export the following env variables: + ```bash + export GRAFANA_ADMIN_PASSWORD=password + export DOMAIN_NAME=corbench.cortexproject.io + export OAUTH_TOKEN= + export WH_SECRET= + export GITHUB_ORG=cortexproject + export GITHUB_REPO=cortex + export SERVICEACCOUNT_CLIENT_EMAIL= + ``` + Assuming you have the ENV variabls exported from the **Set Environment Variables and Deploy the Cluster** step as well, + + From root directory: + ```bash + cd corbench + make cluster_resource_apply + ``` + This command should take less than 5 minutes. If you would like to try again if something went wrong, you can run `make cluster_delete` and redeploy the cluster with `make cluster_create` then run `make cluster_resource_apply` again + + In the output, an ingress IP will be displayed. Note this down as this will be the entrypoint for the resources on this cluster. It should look something like this for example: http://a8adb2fbc32dc4bad8857e009581d6d2-1038785616.us-west-2.elb.amazonaws.com:80. + + You can access the services at: + - Grafana: `http:///grafana` + - Prometheus: `http:///prometheus-meta` + + Note that in the [comment monitor config](../c-manifests/cluster-infra/5a_commentmonitor_configmap_noparse.yaml), the links to these services are hard coded. you must manually input the links from the new ingress IP that you just aquired, and run + ```bash + kubectl apply -f + kubectl rollout restart deployment/comment-monitor + ``` + to update the comment monitor deployment. In the future, you can replace `` with `{{ index . "DOMAIN_NAME" }}` in the [comment monitor config](../c-manifests/cluster-infra/5a_commentmonitor_configmap_noparse.yaml) when we purchase a domain name we can use. After getting a domain name we would need to set the `A record` for `` to point to the `nginx-ingress-controller` IP address that we just noted down. + + Optional: At this point, you can try to run a benchmark test locally as a test to see if it works. Refer to the **## Starting a Benchmark test locally** section near the bottom of this doc + +3. **GitHub Integration 2/2** + We are now ready to setup full integration with the Cortex repo. + + 1. Go to the Cortex repo settings + 2. On the left navigation column, click on **Webhooks** + 3. Click **Add Webhook** + 4. In Payload URL enter `/hook` using the ingress IP you noted in the previous step(for example, `http://a8adb2fbc32dc4bad8857e009581d6d2-1038785616.us-west-2.elb.amazonaws.com:80/hook`) + 5. Change **Content Type** to `application/json` + 6. Under **Secret**, enter in the webhook secret you created and passed as an ENV variable in step 2. **Main Node Pods Deployment**. + 7. Under **Which events would you like to trigger this webhook?** click **Let me select individual events.** + 8. Uncheck every box except for the **Issue comments** box. + 9. Click **Add webhook** + + The webhook should be set up! To give it a test, create a PR and type in /corbench and a response should show up. Next we will set up the github workflow to enable benchmark testing. + + Next, we need to add repository secrets. Steps are below: + 1. Go to Cortex repo settings + 2. On the left nav bar, click on **Secrets and Variales** then from the options that drop down, click on **actions** + 3. Click **New Repository Secret** + 4. Name the secret `EKS_CLUSTER_ROLE_ARN` and put in your `` as the value. then click **add secret** + 5. Repeat the steps above for `EKS_WORKER_ROLE_ARN` putting in the value `` and also for `EKS_SUBNET_IDS` putting in the value `,,` (these are the same as the enviroment variables you exported in previous steps) + 6. Again make a secret named `TEST_INFRA_PROVIDER_AUTH` but with the base64 encoded value of your `auth_file.yaml` file. Instructions on how to do this are below. + + The secret `TEST_INFRA_PROVIDER_AUTH` is special, it needs to be base64 encoded. You should have filled out your auth_file.yaml with your credentials, so from the root directory, run: + + ```bash + base64 -i auth_file.yaml + ``` + + Copy the output, then make a repository secret with the name `TEST_INFRA_PROVIDER_AUTH` and the value you just copied from the output. + + + In the cortex repo, in the `.github/workflows/` directory, add the following yml file and name it corbench.yml (or whatever you want): + ```yaml + on: + repository_dispatch: + types: [corbench_start, corbench_stop] + name: Corbench Workflow + permissions: + contents: read + env: + AUTH_FILE: ${{ secrets.TEST_INFRA_PROVIDER_AUTH }} + CLUSTER_NAME: corbench + DOMAIN_NAME: corbench.cortex.io + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_ORG: cortexproject + GITHUB_REPO: cortex + GITHUB_STATUS_TARGET_URL: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}} + LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }} + PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }} + RELEASE: ${{ github.event.client_payload.CORTEX_TAG }} + ZONE: us-west-2 + EKS_WORKER_ROLE_ARN: ${{ secrets.EKS_WORKER_ROLE_ARN }} + EKS_CLUSTER_ROLE_ARN: ${{ secrets.EKS_CLUSTER_ROLE_ARN }} + EKS_SUBNET_IDS: ${{ secrets.EKS_SUBNET_IDS }} + SEPARATOR: "," + jobs: + benchmark_start: + name: Corbench Start + if: github.event.action == 'corbench_start' + runs-on: ubuntu-latest + steps: + - name: Update status to pending + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"pending", "context": "corbench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Run make deploy to start corbench + id: make_deploy + uses: docker://corbench/corbench:latest + with: + args: >- + until make all_nodes_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done; + make deploy; + - name: Update status to failure + if: failure() + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"failure", "context": "corbench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Update status to success + if: success() + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"success", "context": "corbench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + benchmark_cancel: + name: Corbench Cancel + if: github.event.action == 'corbench_stop' + runs-on: ubuntu-latest + steps: + - name: Update status to pending + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"pending", "context": "corbench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Run make clean to stop corbench + id: make_clean + uses: docker://corbench/corbench:latest + with: + args: >- + until make all_nodes_running; do echo "waiting for nodepools to be created"; sleep 10; done; + make clean; + - name: Update status to failure + if: failure() + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"failure", "context": "corbench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Update status to success + if: success() + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"success", "context": "corbench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + ``` + + After this change is merged to the upstream branch of the repo, the benchmarking tool should be completely set up!. Give it a try by typing /corbench and following the instructions to start a benchmark test. + + Finally you're done! Congrats on making it to the end of this super long deployment doc and happy benchmarking! + + +## Starting a Benchmark test locally + +### 1. Start a Benchmarking Test Manually + If you are making changes to benchmark tests, or if you just want to test if benchmark tests can be ran, or if you just want to mess around, you can run them by following the steps below. +--- + +1. **Set the Environment Variables**: + + Assuming you have all above mentioned env variables already exported in previous steps, + + ```bash + export RELEASE= + export PR_NUMBER= + ``` + +2. **Start the test** + +--- + + Run the following command (you should be aware that you need to run make comands in the /corbench directory by now): + + ```bash + make deploy + ``` + + Now you can check the grafana dashboards you set up earlier in the deployment to see the results after deployment is finished! + +### 2. Stopping and cleaning up Benchmarking Test + +--- + + Assuming you have previous enviroment variables exported, run + + ```bash + make clean + ``` + diff --git a/corbench/gp2-csi-storageclass.yaml b/corbench/gp2-csi-storageclass.yaml new file mode 100644 index 0000000..322adf8 --- /dev/null +++ b/corbench/gp2-csi-storageclass.yaml @@ -0,0 +1,10 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: gp2-csi +provisioner: ebs.csi.aws.com +parameters: + type: gp2 + fsType: ext4 +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true \ No newline at end of file diff --git a/corbench/infra/Dockerfile b/corbench/infra/Dockerfile new file mode 100644 index 0000000..dc84ae4 --- /dev/null +++ b/corbench/infra/Dockerfile @@ -0,0 +1,18 @@ +# Build stage +FROM golang:1.15-alpine AS builder + +RUN apk add --no-cache git make + +WORKDIR /src +COPY . . + +RUN go build -o infra + +# Runtime stage +FROM alpine:latest + +RUN apk add --no-cache ca-certificates + +COPY --from=builder /src/infra /bin/infra + +ENTRYPOINT ["/bin/infra"] diff --git a/corbench/infra/README.md b/corbench/infra/README.md new file mode 100644 index 0000000..d4f66ee --- /dev/null +++ b/corbench/infra/README.md @@ -0,0 +1,98 @@ +# infra: CLI Tool for Managing Kubernetes Clusters + +`infra` is a CLI tool designed to create, scale, and delete Kubernetes clusters and deploy manifest files. + +## Building Infra Binary IMPORTANT + +If you want to build and deploy the corbench docker image, you MUST first build the infra.go binary by going to the /infra directory then running: + +```bash +GOOS=linux GOARCH=amd64 go build -o infra infra.go +``` + +The architecture must be linux/amd64 as this is what the docker image runs on. +We are building it outside of the docker image to avoid having to download many imports that are required to build this infra tool every time this image is pulled. If using the infra tool locally, just run go build without specifying architecture. + +## Table of Contents + +1. [Parsing of Files](#parsing-of-files) +2. [Usage and Examples](#usage-and-examples) + - [General Flags](#general-flags) + - [Commands](#commands) + - [EKS Commands](#eks-commands) +3. [Building Docker Image](#building-docker-image) + +## Parsing of Files + +Files passed to `infra` will be parsed using Go templates. To skip parsing and load the file as is, use the `noparse` suffix. + +- **Parsed File**: `somefile.yaml` +- **Non-Parsed File**: `somefile_noparse.yaml` + +## Usage and Examples + +### General Flags + +```txt +usage: infra [] [ ...] + +The prometheus/test-infra deployment tool + +Flags: + -h, --help Show context-sensitive help (also try --help-long and --help-man). + -f, --file=FILE ... YAML file or folder describing the parameters for the object that will be deployed. + -v, --vars=VARS ... Substitutes the token holders in the YAML file. Follows standard Go template formatting (e.g., {{ .hashStable }}). +``` + +### Commands + +#### EKS Commands + +- **eks info** + ```bash + eks info -v hashStable:COMMIT1 -v hashTesting:COMMIT2 + ``` + +- **eks cluster create** + ```bash + eks cluster create -a credentials -f FileOrFolder + ``` + +- **eks cluster delete** + ```bash + eks cluster delete -a credentials -f FileOrFolder + ``` + +- **eks nodes create** + ```bash + eks nodes create -a authFile -f FileOrFolder -v ZONE:eu-west-1 -v CLUSTER_NAME:test \ + -v EKS_SUBNET_IDS:subnetId1,subnetId2,subnetId3 + ``` + +- **eks nodes delete** + ```bash + eks nodes delete -a authFile -f FileOrFolder -v ZONE:eu-west-1 -v CLUSTER_NAME:test \ + -v EKS_SUBNET_IDS:subnetId1,subnetId2,subnetId3 + ``` + +- **eks nodes check-running** + ```bash + eks nodes check-running -a credentials -f FileOrFolder -v ZONE:eu-west-1 -v CLUSTER_NAME:test \ + -v EKS_SUBNET_IDS:subnetId1,subnetId2,subnetId3 + ``` + +- **eks nodes check-deleted** + ```bash + eks nodes check-deleted -a authFile -f FileOrFolder -v ZONE:eu-west-1 -v CLUSTER_NAME:test \ + -v EKS_SUBNET_IDS:subnetId1,subnetId2,subnetId3 + ``` + +- **eks resource apply** + ```bash + eks resource apply -a credentials -f manifestsFileOrFolder -v hashStable:COMMIT1 -v hashTesting:COMMIT2 + ``` + +- **eks resource delete** + ```bash + eks resource delete -a credentials -f manifestsFileOrFolder -v hashStable:COMMIT1 -v hashTesting:COMMIT2 + ``` diff --git a/corbench/infra/infra.go b/corbench/infra/infra.go new file mode 100644 index 0000000..2903eff --- /dev/null +++ b/corbench/infra/infra.go @@ -0,0 +1,91 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main // import "github.com/prometheus/test-infra/infra" + +import ( + "fmt" + "log" + "os" + "path/filepath" + + "gopkg.in/alecthomas/kingpin.v2" + + "github.com/cortexproject/test-infra/corbench/pkg/provider" + "github.com/cortexproject/test-infra/corbench/pkg/provider/eks" +) + +func main() { + log.SetFlags(log.Ltime | log.Lshortfile) + + dr := provider.NewDeploymentResource() + + app := kingpin.New(filepath.Base(os.Args[0]), "The prometheus/test-infra deployment tool") + app.HelpFlag.Short('h') + app.Flag("file", "yaml file or folder that describes the parameters for the object that will be deployed."). + Short('f'). + ExistingFilesOrDirsVar(&dr.DeploymentFiles) + app.Flag("vars", "When provided it will substitute the token holders in the yaml file. Follows the standard golang template formating - {{ .hashStable }}."). + Short('v'). + StringMapVar(&dr.FlagDeploymentVars) + + // EKS based commands + e := eks.New(dr) + k8sEKS := app.Command("eks", "Amazon Elastic Kubernetes Service - https://aws.amazon.com/eks"). + Action(e.SetupDeploymentResources) + k8sEKS.Flag("auth", "filename which consist eks credentials."). + PlaceHolder("credentials"). + Short('a'). + StringVar(&e.Auth) + + k8sEKS.Command("info", "eks info -v hashStable:COMMIT1 -v hashTesting:COMMIT2"). + Action(e.GetDeploymentVars) + + // EKS Cluster operations + k8sEKSCluster := k8sEKS.Command("cluster", "manage EKS clusters"). + Action(e.NewEKSClient). + Action(e.EKSDeploymentParse) + k8sEKSCluster.Command("create", "eks cluster create -a credentials -f FileOrFolder"). + Action(e.ClusterCreate) + k8sEKSCluster.Command("delete", "eks cluster delete -a credentials -f FileOrFolder"). + Action(e.ClusterDelete) + + // Cluster node-pool operations + k8sEKSNodeGroup := k8sEKS.Command("nodes", "manage EKS clusters nodegroups"). + Action(e.NewEKSClient). + Action(e.EKSDeploymentParse) + k8sEKSNodeGroup.Command("create", "eks nodes create -a authFile -f FileOrFolder -v ZONE:eu-west-1 -v CLUSTER_NAME:test -v EKS_SUBNET_IDS: subnetId1,subnetId2,subnetId3"). + Action(e.NodeGroupCreate) + k8sEKSNodeGroup.Command("delete", "eks nodes delete -a authFile -f FileOrFolder -v ZONE:eu-west-1 -v CLUSTER_NAME:test -v EKS_SUBNET_IDS: subnetId1,subnetId2,subnetId3"). + Action(e.NodeGroupDelete) + k8sEKSNodeGroup.Command("check-running", "eks nodes check-running -a credentials -f FileOrFolder -v ZONE:eu-west-1 -v CLUSTER_NAME:test -v EKS_SUBNET_IDS: subnetId1,subnetId2,subnetId3"). + Action(e.AllNodeGroupsRunning) + k8sEKSNodeGroup.Command("check-deleted", "eks nodes check-deleted -a authFile -f FileOrFolder -v ZONE:eu-west-1 -v CLUSTER_NAME:test -v EKS_SUBNET_IDS: subnetId1,subnetId2,subnetId3"). + Action(e.AllNodeGroupsDeleted) + + // K8s resource operations. + k8sEKSResource := k8sEKS.Command("resource", `Apply and delete different k8s resources - deployments, services, config maps etc.Required variables -v ZONE:us-east-2 -v CLUSTER_NAME:test `). + Action(e.NewEKSClient). + Action(e.K8SDeploymentsParse). + Action(e.NewK8sProvider) + k8sEKSResource.Command("apply", "eks resource apply -a credentials -f manifestsFileOrFolder -v hashStable:COMMIT1 -v hashTesting:COMMIT2"). + Action(e.ResourceApply) + k8sEKSResource.Command("delete", "eks resource delete -a credentials -f manifestsFileOrFolder -v hashStable:COMMIT1 -v hashTesting:COMMIT2"). + Action(e.ResourceDelete) + + if _, err := app.Parse(os.Args[1:]); err != nil { + fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err)) + app.Usage(os.Args[1:]) + os.Exit(2) + } +} diff --git a/corbench/pkg/provider/eks/eks.go b/corbench/pkg/provider/eks/eks.go new file mode 100644 index 0000000..c3e2aed --- /dev/null +++ b/corbench/pkg/provider/eks/eks.go @@ -0,0 +1,589 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package eks + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "log" + "os" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + awsSession "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/eks" + "gopkg.in/alecthomas/kingpin.v2" + yamlGo "gopkg.in/yaml.v2" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + awsToken "sigs.k8s.io/aws-iam-authenticator/pkg/token" + + "github.com/cortexproject/test-infra/corbench/pkg/provider" + k8sProvider "github.com/cortexproject/test-infra/corbench/pkg/provider/k8s" +) + +type Resource = provider.Resource + +type eksCluster struct { + Cluster eks.CreateClusterInput + NodeGroups []eks.CreateNodegroupInput +} + +// EKS holds the fields used to generate an API request. +type EKS struct { + Auth string + + ClusterName string + // The eks client used when performing EKS requests. + clientEKS *eks.EKS + // The aws session used in abstraction of aws credentials. + sessionAWS *awsSession.Session + // The k8s provider used when we work with the manifest files. + k8sProvider *k8sProvider.K8s + // Final DeploymentFiles files. + DeploymentFiles []string + // Final DeploymentVars. + DeploymentVars map[string]string + // DeployResource to construct DeploymentVars and DeploymentFiles + DeploymentResource *provider.DeploymentResource + // Content bytes after parsing the template variables, grouped by filename. + eksResources []Resource + // K8s resource.runtime objects after parsing the template variables, grouped by filename. + k8sResources []k8sProvider.Resource + + ctx context.Context +} + +// New is the EKS constructor +func New(dr *provider.DeploymentResource) *EKS { + eks := &EKS{ + DeploymentResource: dr, + } + return eks +} + +// NewEKSClient sets the EKS client used when performing the GKE requests. +func (c *EKS) NewEKSClient(*kingpin.ParseContext) error { + if c.Auth != "" { + } else if c.Auth = os.Getenv("AWS_APPLICATION_CREDENTIALS"); c.Auth == "" { + return fmt.Errorf("no auth provided set the auth flag or the AWS_APPLICATION_CREDENTIALS env variable") + } + + // When the auth variable points to a file + // put the file content in the variable. + if content, err := os.ReadFile(c.Auth); err == nil { + c.Auth = string(content) + } + + // Check if auth data is base64 encoded and decode it. + encoded, err := regexp.MatchString("^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$", c.Auth) + if err != nil { + return err + } + if encoded { + auth, err := base64.StdEncoding.DecodeString(c.Auth) + if err != nil { + return fmt.Errorf("could not decode auth data: %w", err) + } + c.Auth = string(auth) + } + + credValue := &credentials.Value{} + if err = yamlGo.UnmarshalStrict([]byte(c.Auth), credValue); err != nil { + return fmt.Errorf("could not get credential values: %w", err) + } + + awsSess := awsSession.Must(awsSession.NewSession(&aws.Config{ + Credentials: credentials.NewStaticCredentialsFromCreds(*credValue), + Region: aws.String(c.DeploymentVars["ZONE"]), + })) + + c.sessionAWS = awsSess + c.clientEKS = eks.New(awsSess) + c.ctx = context.Background() + return nil +} + +// checkDeploymentVarsAndFiles checks whether the requied deployment vars are passed. +func (c *EKS) checkDeploymentVarsAndFiles() error { + reqDepVars := []string{"ZONE", "CLUSTER_NAME"} + for _, k := range reqDepVars { + if v := c.DeploymentVars[k]; v == "" { + return fmt.Errorf("missing required %v variable", k) + } + } + if len(c.DeploymentFiles) == 0 { + return fmt.Errorf("missing deployment file(s)") + } + return nil +} + +// SetupDeploymentResources Sets up DeploymentVars and DeploymentFiles +func (c *EKS) SetupDeploymentResources(*kingpin.ParseContext) error { + c.DeploymentFiles = c.DeploymentResource.DeploymentFiles + c.DeploymentVars = provider.MergeDeploymentVars( + c.DeploymentResource.DefaultDeploymentVars, + c.DeploymentResource.FlagDeploymentVars, + ) + return nil +} + +// EKSDeploymentParse parses the cluster/nodegroups deployment file and saves the result as bytes grouped by the filename. +// Any variables passed to the cli will be replaced in the resource files following the golang text template format. +func (c *EKS) EKSDeploymentParse(*kingpin.ParseContext) error { + if err := c.checkDeploymentVarsAndFiles(); err != nil { + return err + } + + deploymentResource, err := provider.DeploymentsParse(c.DeploymentFiles, c.DeploymentVars) + if err != nil { + return fmt.Errorf("Couldn't parse deployment files: %w", err) + } + + c.eksResources = deploymentResource + return nil +} + +// K8SDeploymentsParse parses the k8s objects deployment files and saves the result as k8s objects grouped by the filename. +// Any variables passed to the cli will be replaced in the resources files following the golang text template format. +func (c *EKS) K8SDeploymentsParse(*kingpin.ParseContext) error { + if err := c.checkDeploymentVarsAndFiles(); err != nil { + return err + } + + deploymentResource, err := provider.DeploymentsParse(c.DeploymentFiles, c.DeploymentVars) + if err != nil { + return fmt.Errorf("Couldn't parse deployment files: %w", err) + } + + for _, deployment := range deploymentResource { + decode := scheme.Codecs.UniversalDeserializer().Decode + k8sObjects := make([]runtime.Object, 0) + + for _, text := range strings.Split(string(deployment.Content), provider.Separator) { + text = strings.TrimSpace(text) + if len(text) == 0 { + continue + } + + resource, _, err := decode([]byte(text), nil, nil) + if err != nil { + return fmt.Errorf("decoding the resource file:%v, section:%v...: %w", deployment.FileName, text[:100], err) + } + if resource == nil { + continue + } + k8sObjects = append(k8sObjects, resource) + } + if len(k8sObjects) > 0 { + c.k8sResources = append(c.k8sResources, k8sProvider.Resource{FileName: deployment.FileName, Objects: k8sObjects}) + } + } + return nil +} + +// ClusterCreate create a new cluster or applies changes to an existing cluster. +func (c *EKS) ClusterCreate(*kingpin.ParseContext) error { + req := &eksCluster{} + for _, deployment := range c.eksResources { + if err := yamlGo.UnmarshalStrict(deployment.Content, req); err != nil { + return fmt.Errorf("Error parsing the cluster deployment file %s: %w", deployment.FileName, err) + } + + log.Printf("Cluster create request: name:'%s'", *req.Cluster.Name) + _, err := c.clientEKS.CreateCluster(&req.Cluster) + if err != nil { + return fmt.Errorf("Couldn't create cluster '%v', file:%v ,err: %w", *req.Cluster.Name, deployment.FileName, err) + } + + err = provider.RetryUntilTrue( + fmt.Sprintf("creating cluster:%v", *req.Cluster.Name), + provider.EKSRetryCount, + func() (bool, error) { return c.clusterRunning(*req.Cluster.Name) }, + ) + if err != nil { + return fmt.Errorf("creating cluster err: %w", err) + } + + for _, nodegroupReq := range req.NodeGroups { + nodegroupReq.ClusterName = req.Cluster.Name + log.Printf("Nodegroup create request: NodeGroupName: '%s', ClusterName: '%s'", *nodegroupReq.NodegroupName, *req.Cluster.Name) + _, err := c.clientEKS.CreateNodegroup(&nodegroupReq) + if err != nil { + return fmt.Errorf("Couldn't create nodegroup '%v' for cluster '%v, file:%v ,err: %w", nodegroupReq.NodegroupName, req.Cluster.Name, deployment.FileName, err) + } + + err = provider.RetryUntilTrue( + fmt.Sprintf("creating nodegroup:%s for cluster:%s", *nodegroupReq.NodegroupName, *req.Cluster.Name), + provider.EKSRetryCount, + func() (bool, error) { return c.nodeGroupCreated(*nodegroupReq.NodegroupName, *req.Cluster.Name) }, + ) + if err != nil { + return fmt.Errorf("creating nodegroup err: %w", err) + } + } + } + return nil +} + +// ClusterDelete deletes a eks Cluster +func (c *EKS) ClusterDelete(*kingpin.ParseContext) error { + req := &eksCluster{} + for _, deployment := range c.eksResources { + if err := yamlGo.UnmarshalStrict(deployment.Content, req); err != nil { + return fmt.Errorf("Error parsing the cluster deployment file %s: %w", deployment.FileName, err) + } + + // To delete a cluster we have to manually delete all cluster + log.Printf("Removing all nodepools for '%s'", *req.Cluster.Name) + + // Listing all nodepools for cluster + reqL := &eks.ListNodegroupsInput{ + ClusterName: req.Cluster.Name, + } + + for { + resL, err := c.clientEKS.ListNodegroups(reqL) + if err != nil { + return fmt.Errorf("listing nodepools err: %w", err) + } + + for _, nodegroup := range resL.Nodegroups { + log.Printf("Removing nodepool '%s' in cluster '%s'", *nodegroup, *req.Cluster.Name) + + reqD := eks.DeleteNodegroupInput{ + ClusterName: req.Cluster.Name, + NodegroupName: nodegroup, + } + _, err := c.clientEKS.DeleteNodegroup(&reqD) + if err != nil { + return fmt.Errorf("Couldn't create nodegroup '%v' for cluster '%v ,err: %w", *nodegroup, req.Cluster.Name, err) + } + + err = provider.RetryUntilTrue( + fmt.Sprintf("deleting nodegroup:%v for cluster:%v", *nodegroup, *req.Cluster.Name), + provider.GlobalRetryCount, + func() (bool, error) { return c.nodeGroupDeleted(*nodegroup, *req.Cluster.Name) }, + ) + if err != nil { + return fmt.Errorf("deleting nodegroup err: %w", err) + } + } + + if resL.NextToken == nil { + break + } + reqL.NextToken = resL.NextToken + } + + reqD := &eks.DeleteClusterInput{ + Name: req.Cluster.Name, + } + + log.Printf("Removing cluster '%v'", *reqD.Name) + _, err := c.clientEKS.DeleteCluster(reqD) + if err != nil { + return fmt.Errorf("Couldn't delete cluster '%v', file:%v ,err: %w", *req.Cluster.Name, deployment.FileName, err) + } + + err = provider.RetryUntilTrue( + fmt.Sprintf("deleting cluster:%v", *reqD.Name), + provider.GlobalRetryCount, + func() (bool, error) { return c.clusterDeleted(*reqD.Name) }) + if err != nil { + return fmt.Errorf("removing cluster err: %w", err) + } + } + return nil +} + +// clusterRunning checks whether a cluster is in a active state. +func (c *EKS) clusterRunning(name string) (bool, error) { + req := &eks.DescribeClusterInput{ + Name: aws.String(name), + } + clusterRes, err := c.clientEKS.DescribeCluster(req) + if err != nil { + var aerr awserr.Error + if errors.As(err, &aerr) && aerr.Code() == eks.ErrCodeNotFoundException { + return false, nil + } + return false, fmt.Errorf("Couldn't get cluster status: %w", err) + } + if *clusterRes.Cluster.Status == eks.ClusterStatusFailed { + return false, fmt.Errorf("Cluster not in a status to become ready - %s", *clusterRes.Cluster.Status) + } + if *clusterRes.Cluster.Status == eks.ClusterStatusActive { + return true, nil + } + log.Printf("Cluster '%v' status: %v", name, *clusterRes.Cluster.Status) + return false, nil +} + +func (c *EKS) clusterDeleted(name string) (bool, error) { + req := &eks.DescribeClusterInput{ + Name: aws.String(name), + } + clusterRes, err := c.clientEKS.DescribeCluster(req) + if err != nil { + var aerr awserr.Error + if errors.As(err, &aerr) && aerr.Code() == eks.ErrCodeResourceNotFoundException { + return true, nil + } + return false, fmt.Errorf("Couldn't get cluster status: %w", err) + } + + log.Printf("Cluster '%v' status: %v", name, *clusterRes.Cluster.Status) + return false, nil +} + +// NodeGroupCreate creates a new k8s nodegroup in an existing cluster. +func (c *EKS) NodeGroupCreate(*kingpin.ParseContext) error { + req := &eksCluster{} + for _, deployment := range c.eksResources { + if err := yamlGo.UnmarshalStrict(deployment.Content, req); err != nil { + return fmt.Errorf("Error parsing the cluster deployment file %s: %w", deployment.FileName, err) + } + + for _, nodegroupReq := range req.NodeGroups { + nodegroupReq.ClusterName = req.Cluster.Name + log.Printf("Nodegroup create request: NodeGroupName: '%s', ClusterName: '%s'", *nodegroupReq.NodegroupName, *req.Cluster.Name) + _, err := c.clientEKS.CreateNodegroup(&nodegroupReq) + if err != nil { + return fmt.Errorf("Couldn't create nodegroup '%s' for cluster '%s', file:%v ,err: %w", *nodegroupReq.NodegroupName, *req.Cluster.Name, deployment.FileName, err) + } + + err = provider.RetryUntilTrue( + fmt.Sprintf("creating nodegroup:%s for cluster:%s", *nodegroupReq.NodegroupName, *req.Cluster.Name), + provider.GlobalRetryCount, + func() (bool, error) { return c.nodeGroupCreated(*nodegroupReq.NodegroupName, *req.Cluster.Name) }, + ) + if err != nil { + return fmt.Errorf("creating nodegroup err: %w", err) + } + } + } + return nil +} + +// NodeGroupDelete deletes a k8s nodegroup in an existing cluster +func (c *EKS) NodeGroupDelete(*kingpin.ParseContext) error { + req := &eksCluster{} + for _, deployment := range c.eksResources { + if err := yamlGo.UnmarshalStrict(deployment.Content, req); err != nil { + return fmt.Errorf("Error parsing the cluster deployment file %s: %w", deployment.FileName, err) + } + + for _, nodegroupReq := range req.NodeGroups { + nodegroupReq.ClusterName = req.Cluster.Name + log.Printf("Nodegroup delete request: NodeGroupName: '%s', ClusterName: '%s'", *nodegroupReq.NodegroupName, *req.Cluster.Name) + reqD := eks.DeleteNodegroupInput{ + ClusterName: req.Cluster.Name, + NodegroupName: nodegroupReq.NodegroupName, + } + _, err := c.clientEKS.DeleteNodegroup(&reqD) + if err != nil { + return fmt.Errorf("Couldn't delete nodegroup '%s' for cluster '%s, file:%v ,err: %w", *nodegroupReq.NodegroupName, *req.Cluster.Name, deployment.FileName, err) + } + err = provider.RetryUntilTrue( + fmt.Sprintf("deleting nodegroup:%s for cluster:%s", *nodegroupReq.NodegroupName, *req.Cluster.Name), + provider.GlobalRetryCount, + func() (bool, error) { return c.nodeGroupDeleted(*nodegroupReq.NodegroupName, *req.Cluster.Name) }, + ) + if err != nil { + return fmt.Errorf("deleting nodegroup err: %w", err) + } + } + } + return nil +} + +func (c *EKS) nodeGroupCreated(nodegroupName, clusterName string) (bool, error) { + req := &eks.DescribeNodegroupInput{ + ClusterName: aws.String(clusterName), + NodegroupName: aws.String(nodegroupName), + } + nodegroupRes, err := c.clientEKS.DescribeNodegroup(req) + if err != nil { + var aerr awserr.Error + if errors.As(err, &aerr) && aerr.Code() == eks.ErrCodeNotFoundException { + return false, nil + } + return false, fmt.Errorf("Couldn't get nodegroupname status: %w", err) + } + if *nodegroupRes.Nodegroup.Status == eks.NodegroupStatusActive { + return true, nil + } + + log.Printf("Nodegroup '%v' for Cluster '%v' status: %v", nodegroupName, clusterName, *nodegroupRes.Nodegroup.Status) + return false, nil +} + +func (c *EKS) nodeGroupDeleted(nodegroupName, clusterName string) (bool, error) { + req := &eks.DescribeNodegroupInput{ + ClusterName: aws.String(clusterName), + NodegroupName: aws.String(nodegroupName), + } + nodegroupRes, err := c.clientEKS.DescribeNodegroup(req) + if err != nil { + var aerr awserr.Error + if errors.As(err, &aerr) && aerr.Code() == eks.ErrCodeResourceNotFoundException { + return true, nil + } + return false, fmt.Errorf("Couldn't get nodegroupname status: %w", err) + } + + log.Printf("Nodegroup '%v' for Cluster '%v' status: %v", nodegroupName, clusterName, *nodegroupRes.Nodegroup.Status) + return false, nil +} + +// AllNodeGroupsRunning returns an error if at least one node pool is not running +func (c *EKS) AllNodeGroupsRunning(*kingpin.ParseContext) error { + req := &eksCluster{} + for _, deployment := range c.eksResources { + if err := yamlGo.UnmarshalStrict(deployment.Content, req); err != nil { + return fmt.Errorf("Error parsing the cluster deployment file %s: %w", deployment.FileName, err) + } + for _, nodegroup := range req.NodeGroups { + isRunning, err := c.nodeGroupCreated(*nodegroup.NodegroupName, *req.Cluster.Name) + if err != nil { + return fmt.Errorf("error fetching nodegroup info") + } + if !isRunning { + return fmt.Errorf("nodepool not running name: %v", *nodegroup.NodegroupName) + } + } + } + return nil +} + +// AllNodeGroupsDeleted returns an error if at least one node pool is not deleted +func (c *EKS) AllNodeGroupsDeleted(*kingpin.ParseContext) error { + req := &eksCluster{} + for _, deployment := range c.eksResources { + if err := yamlGo.UnmarshalStrict(deployment.Content, req); err != nil { + return fmt.Errorf("Error parsing the cluster deployment file %s: %w", deployment.FileName, err) + } + for _, nodegroup := range req.NodeGroups { + isRunning, err := c.nodeGroupDeleted(*nodegroup.NodegroupName, *req.Cluster.Name) + if err != nil { + return fmt.Errorf("error fetching nodegroup info") + } + if !isRunning { + return fmt.Errorf("nodepool not running name: %v", *nodegroup.NodegroupName) + } + } + } + return nil +} + +// EKSK8sToken returns aws iam authenticator token which is used to access eks k8s cluster from outside. +func (c *EKS) EKSK8sToken(clusterName, _ string) awsToken.Token { + gen, err := awsToken.NewGenerator(true, false) + if err != nil { + log.Fatalf("Token abstraction error: %v", err) + } + + opts := &awsToken.GetTokenOptions{ + ClusterID: clusterName, + Session: c.sessionAWS, + } + + tok, err := gen.GetWithOptions(opts) + if err != nil { + log.Fatalf("Token abstraction error: %v", err) + } + + return tok +} + +// NewK8sProvider sets the k8s provider used for deploying k8s manifests +func (c *EKS) NewK8sProvider(*kingpin.ParseContext) error { + clusterName := c.DeploymentVars["CLUSTER_NAME"] + region := c.DeploymentVars["ZONE"] + + req := &eks.DescribeClusterInput{ + Name: &clusterName, + } + + rep, err := c.clientEKS.DescribeCluster(req) + if err != nil { + return fmt.Errorf("failed to get cluster details: %w", err) + } + + arnRole := *rep.Cluster.Arn + + caCert, err := base64.StdEncoding.DecodeString(*rep.Cluster.CertificateAuthority.Data) + if err != nil { + return fmt.Errorf("failed to decode certificate: %w", err) + } + + cluster := clientcmdapi.NewCluster() + cluster.CertificateAuthorityData = []byte(caCert) + cluster.Server = *rep.Cluster.Endpoint + + clusterContext := clientcmdapi.NewContext() + clusterContext.Cluster = arnRole + clusterContext.AuthInfo = arnRole + + authInfo := clientcmdapi.NewAuthInfo() + authInfo.Token = c.EKSK8sToken(clusterName, region).Token + + config := clientcmdapi.NewConfig() + config.AuthInfos[arnRole] = authInfo + config.Contexts[arnRole] = clusterContext + config.Clusters[arnRole] = cluster + config.CurrentContext = arnRole + config.Kind = "Config" + config.APIVersion = "v1" + + c.k8sProvider, err = k8sProvider.New(c.ctx, config) + if err != nil { + return fmt.Errorf("k8s provider error %w", err) + } + + return nil +} + +// ResourceApply calls k8s.ResourceApply to apply the k8s objects in the manifest files. +func (c *EKS) ResourceApply(*kingpin.ParseContext) error { + if err := c.k8sProvider.ResourceApply(c.k8sResources); err != nil { + return fmt.Errorf("error while applying a resource err: %w", err) + } + return nil +} + +// ResourceDelete calls k8s.ResourceDelete to apply the k8s objects in the manifest files. +func (c *EKS) ResourceDelete(*kingpin.ParseContext) error { + if err := c.k8sProvider.ResourceDelete(c.k8sResources); err != nil { + return fmt.Errorf("error while deleting objects from a manifest file err: %w", err) + } + return nil +} + +// GetDeploymentVars shows deployment variables. +func (c *EKS) GetDeploymentVars(*kingpin.ParseContext) error { + fmt.Print("-------------------\n DeploymentVars \n------------------- \n") + for key, value := range c.DeploymentVars { + fmt.Println(key, " : ", value) + } + + return nil +} diff --git a/corbench/pkg/provider/k8s/k8s.go b/corbench/pkg/provider/k8s/k8s.go new file mode 100644 index 0000000..3f81cd1 --- /dev/null +++ b/corbench/pkg/provider/k8s/k8s.go @@ -0,0 +1,1557 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "context" + "fmt" + "log" + "strings" + + "gopkg.in/alecthomas/kingpin.v2" + admissionregistration "k8s.io/api/admissionregistration/v1" + appsV1 "k8s.io/api/apps/v1" + batchV1 "k8s.io/api/batch/v1" + apiCoreV1 "k8s.io/api/core/v1" + apiNetworkingV1 "k8s.io/api/networking/v1" + rbac "k8s.io/api/rbac/v1" + apiServerExtensionsV1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiServerExtensionsClient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + apiMetaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/util/retry" + _ "k8s.io/cloud-provider-gcp/pkg/clientauthplugin/gcp" + + "github.com/cortexproject/test-infra/corbench/pkg/provider" +) + +func init() { + if err := apiServerExtensionsV1beta1.AddToScheme(scheme.Scheme); err != nil { + log.Fatal("apiServerExtensionsV1beta1.AddToScheme err:", err) + } +} + +// Resource holds the resource objects after parsing deployment files. +type Resource struct { + FileName string + Objects []runtime.Object +} + +// K8s holds the fields used to generate API request from within a cluster. +type K8s struct { + clt *kubernetes.Clientset + ApiExtClient *apiServerExtensionsClient.Clientset + // DeploymentFiles files provided from the cli. + DeploymentFiles []string + // Variables to substitute in the DeploymentFiles. + // These are also used when the command requires some variables that are not provided by the deployment file. + DeploymentVars map[string]string + // K8s resource.runtime objects after parsing the template variables, grouped by filename. + resources []Resource + + ctx context.Context +} + +// New returns a k8s client that can apply and delete resources. +func New(ctx context.Context, config *clientcmdapi.Config) (*K8s, error) { + var restConfig *rest.Config + var err error + if config == nil { + restConfig, err = rest.InClusterConfig() + } else { + restConfig, err = clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}).ClientConfig() + } + if err != nil { + return nil, fmt.Errorf("k8s config error: %w", err) + } + + clientset, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, fmt.Errorf("k8s client error: %w", err) + } + + apiExtClientset, err := apiServerExtensionsClient.NewForConfig(restConfig) + if err != nil { + return nil, fmt.Errorf("k8s api extensions client error: %w", err) + } + + return &K8s{ + ctx: ctx, + clt: clientset, + ApiExtClient: apiExtClientset, + DeploymentVars: make(map[string]string), + }, nil +} + +// GetResources is a getter function for Resources field in K8s. +func (c *K8s) GetResources() []Resource { + return c.resources +} + +// DeploymentsParse parses the k8s objects deployment files and saves the result as k8s objects grouped by the filename. +// Any variables passed to the cli will be replaced in the resources files following the golang text template format. +func (c *K8s) DeploymentsParse(*kingpin.ParseContext) error { + deploymentResource, err := provider.DeploymentsParse(c.DeploymentFiles, c.DeploymentVars) + if err != nil { + log.Fatalf("Couldn't parse deployment files: %v", err) + } + + for _, deployment := range deploymentResource { + decode := scheme.Codecs.UniversalDeserializer().Decode + k8sObjects := make([]runtime.Object, 0) + + for _, text := range strings.Split(string(deployment.Content), provider.Separator) { + text = strings.TrimSpace(text) + if len(text) == 0 { + continue + } + + resource, _, err := decode([]byte(text), nil, nil) + if err != nil { + return fmt.Errorf("decoding the resource file:%v, section:%v...: %w", deployment.FileName, text[:100], err) + } + if resource == nil { + continue + } + k8sObjects = append(k8sObjects, resource) + } + if len(k8sObjects) > 0 { + c.resources = append(c.resources, Resource{FileName: deployment.FileName, Objects: k8sObjects}) + } + } + return nil +} + +// ResourceApply applies k8s objects. +// The input is a slice of structs containing the filename and the slice of k8s objects present in the file. +func (c *K8s) ResourceApply(deployments []Resource) error { + var err error + for _, deployment := range deployments { + for _, resource := range deployment.Objects { + switch kind := strings.ToLower(resource.GetObjectKind().GroupVersionKind().Kind); kind { + case "clusterrole": + err = c.clusterRoleApply(resource) + case "clusterrolebinding": + err = c.clusterRoleBindingApply(resource) + case "configmap": + err = c.configMapApply(resource) + case "daemonset": + err = c.daemonSetApply(resource) + case "deployment": + err = c.deploymentApply(resource) + case "ingress": + err = c.ingressApply(resource) + case "namespace": + err = c.nameSpaceApply(resource) + case "role": + err = c.roleApply(resource) + case "rolebinding": + err = c.roleBindingApply(resource) + case "service": + err = c.serviceApply(resource) + case "serviceaccount": + err = c.serviceAccountApply(resource) + case "secret": + err = c.secretApply(resource) + case "persistentvolumeclaim": + err = c.persistentVolumeClaimApply(resource) + case "customresourcedefinition": + err = c.customResourceApply(resource) + case "statefulset": + err = c.statefulSetApply(resource) + case "job": + err = c.jobApply(resource) + case "validatingwebhookconfiguration": + err = c.validatingWebhookConfigurationApply(resource) + case "ingressclass": + err = c.ingressClassApply(resource) + default: + err = fmt.Errorf("creating request for unimplimented resource type:%v", kind) + } + if err != nil { + return fmt.Errorf("error applying '%v' err: %w", deployment.FileName, err) + } + } + } + return nil +} + +// ResourceDelete deletes k8s objects. +// The input is a slice of structs containing the filename and the slice of k8s objects present in the file. +func (c *K8s) ResourceDelete(deployments []Resource) error { + var err error + for _, deployment := range deployments { + for _, resource := range deployment.Objects { + switch kind := strings.ToLower(resource.GetObjectKind().GroupVersionKind().Kind); kind { + case "clusterrole": + err = c.clusterRoleDelete(resource) + case "clusterrolebinding": + err = c.clusterRoleBindingDelete(resource) + case "configmap": + err = c.configMapDelete(resource) + case "daemonset": + err = c.daemonsetDelete(resource) + case "deployment": + err = c.deploymentDelete(resource) + case "ingress": + err = c.ingressDelete(resource) + case "namespace": + err = c.namespaceDelete(resource) + case "role": + err = c.roleDelete(resource) + case "rolebinding": + err = c.roleBindingDelete(resource) + case "service": + err = c.serviceDelete(resource) + case "serviceaccount": + err = c.serviceAccountDelete(resource) + case "secret": + err = c.secretDelete(resource) + case "persistentvolumeclaim": + err = c.persistentVolumeClaimDelete(resource) + case "customresourcedefinition": + err = c.customResourceDelete(resource) + case "statefulset": + err = c.statefulSetDelete(resource) + case "job": + err = c.jobDelete(resource) + case "validatingwebhookconfiguration": + err = c.validatingWebhookConfigurationDelete(resource) + case "ingressclass": + err = c.ingressClassDelete(resource) + default: + err = fmt.Errorf("deleting request for unimplimented resource type:%v", kind) + } + if err != nil { + return fmt.Errorf("error deleting '%v' err: %w", deployment.FileName, err) + } + } + } + return nil +} + +// Functions to create different K8s objects. +func (c *K8s) clusterRoleApply(resource runtime.Object) error { + req := resource.(*rbac.ClusterRole) + kind := resource.GetObjectKind().GroupVersionKind().Kind + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.RbacV1().ClusterRoles() + + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("listing resource : %v: %w", kind, err) + } + + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } else if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + return nil + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } +} + +func (c *K8s) clusterRoleBindingApply(resource runtime.Object) error { + req := resource.(*rbac.ClusterRoleBinding) + kind := resource.GetObjectKind().GroupVersionKind().Kind + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.RbacV1().ClusterRoleBindings() + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } else if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) configMapApply(resource runtime.Object) error { + req := resource.(*apiCoreV1.ConfigMap) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + + client := c.clt.CoreV1().ConfigMaps(req.Namespace) + + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } else if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) daemonSetApply(resource runtime.Object) error { + req := resource.(*appsV1.DaemonSet) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.AppsV1().DaemonSets(req.Namespace) + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } else if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return c.daemonsetReady(resource) +} + +func (c *K8s) deploymentApply(resource runtime.Object) error { + req := resource.(*appsV1.Deployment) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.AppsV1().Deployments(req.Namespace) + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + } else { + if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + } + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return provider.RetryUntilTrue( + fmt.Sprintf("applying deployment:%v", req.Name), + provider.GlobalRetryCount, + func() (bool, error) { return c.deploymentReady(resource) }) +} + +func (c *K8s) statefulSetApply(resource runtime.Object) error { + req := resource.(*appsV1.StatefulSet) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.AppsV1().StatefulSets(req.Namespace) + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + } else { + if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + } + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + + return provider.RetryUntilTrue( + fmt.Sprintf("applying statefulSet:%v", req.Name), + provider.GlobalRetryCount, + func() (bool, error) { return c.statefulSetReady(resource) }) +} + +func (c *K8s) jobApply(resource runtime.Object) error { + req := resource.(*batchV1.Job) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.BatchV1().Jobs(req.Namespace) + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } else if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + const Infinite int = 1<<31 - 1 + return provider.RetryUntilTrue( + fmt.Sprintf("running job:%v", req.Name), + Infinite, + func() (bool, error) { return c.jobReady(resource) }) +} + +func (c *K8s) validatingWebhookConfigurationApply(resource runtime.Object) error { + req := resource.(*admissionregistration.ValidatingWebhookConfiguration) + kind := resource.GetObjectKind().GroupVersionKind().Kind + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.AdmissionregistrationV1().ValidatingWebhookConfigurations() + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + if req.ResourceVersion == "" { + req.ResourceVersion = l.ResourceVersion + } + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } else if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) customResourceApply(resource runtime.Object) error { + req := resource.(*apiServerExtensionsV1beta1.CustomResourceDefinition) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1beta1": + client := c.ApiExtClient.ApiextensionsV1beta1().CustomResourceDefinitions() + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + break + } + } + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } else if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + + return nil +} + +func (c *K8s) ingressApply(resource runtime.Object) error { + req := resource.(*apiNetworkingV1.Ingress) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.NetworkingV1().Ingresses(req.Namespace) + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } else if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) ingressClassApply(resource runtime.Object) error { + req, ok := resource.(*apiNetworkingV1.IngressClass) + if !ok { + return fmt.Errorf("expected IngressClass, but got %T", resource) + } + kind := resource.GetObjectKind().GroupVersionKind().Kind + + if resource.GetObjectKind().GroupVersionKind().Version != "v1" { + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", + resource.GetObjectKind().GroupVersionKind().Version, kind, req.Name) + } + + client := c.clt.NetworkingV1().IngressClasses() + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource: %v, name: %v: %w", kind, req.Name, err) + } + + for _, l := range list.Items { + if l.Name == req.Name { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } + } + + if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + return nil +} + +func (c *K8s) nameSpaceApply(resource runtime.Object) error { + req := resource.(*apiCoreV1.Namespace) + kind := resource.GetObjectKind().GroupVersionKind().Kind + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.CoreV1().Namespaces() + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } else if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) roleApply(resource runtime.Object) error { + req := resource.(*rbac.Role) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.RbacV1().Roles(req.Namespace) + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } else if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) roleBindingApply(resource runtime.Object) error { + req := resource.(*rbac.RoleBinding) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.RbacV1().RoleBindings(req.Namespace) + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } else if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) serviceAccountApply(resource runtime.Object) error { + req := resource.(*apiCoreV1.ServiceAccount) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.CoreV1().ServiceAccounts(req.Namespace) + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } else if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) serviceApply(resource runtime.Object) error { + req := resource.(*apiCoreV1.Service) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.CoreV1().Services(req.Namespace) + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + // Immutable fields must be set when updating. + // See https://github.com/kubernetes/kubernetes/pull/66602 + if req.ResourceVersion == "" { + req.ResourceVersion = l.ResourceVersion + } + if req.Spec.ClusterIP == "" { + req.Spec.ClusterIP = l.Spec.ClusterIP + } + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } else if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + + return provider.RetryUntilTrue( + fmt.Sprintf("applying service:%v", req.Name), + provider.GlobalRetryCount, + func() (bool, error) { return c.serviceExists(resource) }) +} + +func (c *K8s) secretApply(resource runtime.Object) error { + req := resource.(*apiCoreV1.Secret) + kind := req.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.CoreV1().Secrets(req.Namespace) + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } else if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) persistentVolumeClaimApply(resource runtime.Object) error { + req := resource.(*apiCoreV1.PersistentVolumeClaim) + kind := req.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.CoreV1().PersistentVolumeClaims(req.Namespace) + list, err := client.List(c.ctx, apiMetaV1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resource : %v, name: %v: %w", kind, req.Name, err) + } + + var exists bool + for _, l := range list.Items { + if l.Name == req.Name { + exists = true + break + } + } + + if exists { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := client.Update(c.ctx, req, apiMetaV1.UpdateOptions{}) + return err + }); err != nil { + return fmt.Errorf("resource update failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource updated - kind: %v, name: %v", kind, req.Name) + return nil + } else if _, err := client.Create(c.ctx, req, apiMetaV1.CreateOptions{}); err != nil { + return fmt.Errorf("resource creation failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource created - kind: %v, name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +// Functions to delete different K8s objects. +func (c *K8s) clusterRoleDelete(resource runtime.Object) error { + req := resource.(*rbac.ClusterRole) + kind := resource.GetObjectKind().GroupVersionKind().Kind + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.RbacV1().ClusterRoles() + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) clusterRoleBindingDelete(resource runtime.Object) error { + req := resource.(*rbac.ClusterRoleBinding) + kind := resource.GetObjectKind().GroupVersionKind().Kind + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.RbacV1().ClusterRoleBindings() + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) configMapDelete(resource runtime.Object) error { + req := resource.(*apiCoreV1.ConfigMap) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.CoreV1().ConfigMaps(req.Namespace) + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) daemonsetDelete(resource runtime.Object) error { + req := resource.(*appsV1.DaemonSet) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.AppsV1().DaemonSets(req.Namespace) + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) deploymentDelete(resource runtime.Object) error { + req := resource.(*appsV1.Deployment) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.AppsV1().Deployments(req.Namespace) + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) statefulSetDelete(resource runtime.Object) error { + req := resource.(*appsV1.StatefulSet) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.AppsV1().StatefulSets(req.Namespace) + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) jobDelete(resource runtime.Object) error { + req := resource.(*batchV1.Job) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.BatchV1().Jobs(req.Namespace) + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) customResourceDelete(resource runtime.Object) error { + req := resource.(*apiServerExtensionsV1beta1.CustomResourceDefinition) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1beta1": + client := c.ApiExtClient.ApiextensionsV1beta1().CustomResourceDefinitions() + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + + return nil +} + +func (c *K8s) ingressDelete(resource runtime.Object) error { + req := resource.(*apiNetworkingV1.Ingress) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.NetworkingV1().Ingresses(req.Namespace) + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) ingressClassDelete(resource runtime.Object) error { + req, ok := resource.(*apiNetworkingV1.IngressClass) + if !ok { + return fmt.Errorf("expected IngressClass, but got %T", resource) + } + kind := resource.GetObjectKind().GroupVersionKind().Kind + + if resource.GetObjectKind().GroupVersionKind().Version != "v1" { + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", + resource.GetObjectKind().GroupVersionKind().Version, kind, req.Name) + } + + client := c.clt.NetworkingV1().IngressClasses() + err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("resource deletion failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + + log.Printf("resource deleted - kind: %v, name: %v", kind, req.Name) + return nil +} + +func (c *K8s) namespaceDelete(resource runtime.Object) error { + req := resource.(*apiCoreV1.Namespace) + kind := resource.GetObjectKind().GroupVersionKind().Kind + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.CoreV1().Namespaces() + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleting - kind: %v , name: %v", kind, req.Name) + return provider.RetryUntilTrue( + fmt.Sprintf("deleting namespace:%v", req.Name), + 2*provider.GlobalRetryCount, + func() (bool, error) { return c.namespaceDeleted(resource) }) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } +} + +func (c *K8s) roleDelete(resource runtime.Object) error { + req := resource.(*rbac.Role) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.RbacV1().Roles(req.Namespace) + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) roleBindingDelete(resource runtime.Object) error { + req := resource.(*rbac.RoleBinding) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.RbacV1().RoleBindings(req.Namespace) + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) serviceDelete(resource runtime.Object) error { + req := resource.(*apiCoreV1.Service) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.CoreV1().Services(req.Namespace) + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) serviceAccountDelete(resource runtime.Object) error { + req := resource.(*apiCoreV1.ServiceAccount) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.CoreV1().ServiceAccounts(req.Namespace) + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) secretDelete(resource runtime.Object) error { + req := resource.(*apiCoreV1.Secret) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.CoreV1().Secrets(req.Namespace) + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) persistentVolumeClaimDelete(resource runtime.Object) error { + req := resource.(*apiCoreV1.PersistentVolumeClaim) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.CoreV1().PersistentVolumeClaims(req.Namespace) + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) validatingWebhookConfigurationDelete(resource runtime.Object) error { + req := resource.(*admissionregistration.ValidatingWebhookConfiguration) + kind := resource.GetObjectKind().GroupVersionKind().Kind + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.AdmissionregistrationV1().ValidatingWebhookConfigurations() + delPolicy := apiMetaV1.DeletePropagationForeground + if err := client.Delete(c.ctx, req.Name, apiMetaV1.DeleteOptions{PropagationPolicy: &delPolicy}); err != nil { + return fmt.Errorf("resource delete failed - kind: %v, name: %v: %w", kind, req.Name, err) + } + log.Printf("resource deleted - kind: %v , name: %v", kind, req.Name) + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) serviceExists(resource runtime.Object) (bool, error) { + req := resource.(*apiCoreV1.Service) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.CoreV1().Services(req.Namespace) + res, err := client.Get(c.ctx, req.Name, apiMetaV1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("Checking Service resource status failed: %w", err) + } + if res.Spec.Type == apiCoreV1.ServiceTypeLoadBalancer { + // K8s API currently just supports LoadBalancerStatus. + if len(res.Status.LoadBalancer.Ingress) > 0 { + log.Printf("\tService %s Details", req.Name) + for _, x := range res.Status.LoadBalancer.Ingress { + ingressHostAddr := "" + if len(x.IP) != 0 { + ingressHostAddr = x.IP + } else { + ingressHostAddr = x.Hostname + } + + log.Printf("\t\thttp://%s:%d", ingressHostAddr, res.Spec.Ports[0].Port) + } + return true, nil + } + return false, nil + } + // For any other type we blindly assume that it is up and running as we have no way of checking. + return true, nil + default: + return false, fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } +} + +func (c *K8s) deploymentReady(resource runtime.Object) (bool, error) { + req := resource.(*appsV1.Deployment) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.AppsV1().Deployments(req.Namespace) + + res, err := client.Get(c.ctx, req.Name, apiMetaV1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("Checking Deployment resource:'%v' status failed err: %w", req.Name, err) + } + + replicas := int32(1) + if req.Spec.Replicas != nil { + replicas = *req.Spec.Replicas + } + if res.Status.AvailableReplicas == replicas { + return true, nil + } + return false, nil + default: + return false, fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } +} + +func (c *K8s) statefulSetReady(resource runtime.Object) (bool, error) { + req := resource.(*appsV1.StatefulSet) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.AppsV1().StatefulSets(req.Namespace) + + res, err := client.Get(c.ctx, req.Name, apiMetaV1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("Checking StatefulSet resource:'%v' status failed err: %w", req.Name, err) + } + + replicas := int32(1) + if req.Spec.Replicas != nil { + replicas = *req.Spec.Replicas + } + if res.Status.ReadyReplicas == replicas { + return true, nil + } + return false, nil + default: + return false, fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } +} + +func (c *K8s) jobReady(resource runtime.Object) (bool, error) { + req := resource.(*batchV1.Job) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.BatchV1().Jobs(req.Namespace) + + res, err := client.Get(c.ctx, req.Name, apiMetaV1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("Checking Job resource:'%v' status failed err: %w", req.Name, err) + } + + // Current `jobReady` only works for non-parallel jobs. + // https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#parallel-jobs + count := int32(1) + if res.Status.Succeeded == count { + return true, nil + } else if res.Status.Failed == count { + return true, fmt.Errorf("Job %v has failed", req.Name) + } + + return false, nil + default: + return false, fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } +} + +func (c *K8s) daemonsetReady(resource runtime.Object) error { + req := resource.(*appsV1.DaemonSet) + kind := resource.GetObjectKind().GroupVersionKind().Kind + if len(req.Namespace) == 0 { + req.Namespace = "default" + } + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.AppsV1().DaemonSets(req.Namespace) + + res, err := client.Get(c.ctx, req.Name, apiMetaV1.GetOptions{}) + if err != nil { + return fmt.Errorf("Checking DaemonSet resource:'%v' status failed err: %w", req.Name, err) + } + if res.Status.NumberUnavailable == 0 { + return nil + } + default: + return fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } + return nil +} + +func (c *K8s) namespaceDeleted(resource runtime.Object) (bool, error) { + req := resource.(*apiCoreV1.Namespace) + kind := resource.GetObjectKind().GroupVersionKind().Kind + + switch v := resource.GetObjectKind().GroupVersionKind().Version; v { + case "v1": + client := c.clt.CoreV1().Namespaces() + + if _, err := client.Get(c.ctx, req.Name, apiMetaV1.GetOptions{}); err != nil { + if apiErrors.IsNotFound(err) { + return true, nil + } + return false, fmt.Errorf("Couldn't get namespace '%v' err: %w", req.Name, err) + } + return false, nil + default: + return false, fmt.Errorf("unknown object version: %v kind:'%v', name:'%v'", v, kind, req.Name) + } +} diff --git a/corbench/pkg/provider/provider.go b/corbench/pkg/provider/provider.go new file mode 100644 index 0000000..f4635e9 --- /dev/null +++ b/corbench/pkg/provider/provider.go @@ -0,0 +1,146 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package provider + +import ( + "bytes" + "fmt" + "log" + "os" + "path/filepath" + "strings" + "text/template" + "time" +) + +const ( + EKSRetryCount = 100 + GlobalRetryCount = 100 + Separator = "---" + globalRetryTime = 10 * time.Second +) + +// DeploymentResource holds list of variables and corresponding files. +type DeploymentResource struct { + // DeploymentFiles files provided from the cli. + DeploymentFiles []string + // DeploymentVars provided from the cli. + FlagDeploymentVars map[string]string + // Default DeploymentVars. + DefaultDeploymentVars map[string]string +} + +// NewDeploymentResource returns DeploymentResource with default values. +func NewDeploymentResource() *DeploymentResource { + return &DeploymentResource{ + DeploymentFiles: []string{}, + FlagDeploymentVars: map[string]string{}, + DefaultDeploymentVars: map[string]string{ + "NGINX_SERVICE_TYPE": "LoadBalancer", + "LOADGEN_SCALE_UP_REPLICAS": "10", + "SEPARATOR": ",", + "SERVICEACCOUNT_CLIENT_EMAIL": "example@example.com", + }, + } +} + +// Resource holds the file content after parsing the template variables. +type Resource struct { + FileName string + Content []byte +} + +// RetryUntilTrue returns when there is an error or the requested operation returns true. +func RetryUntilTrue(name string, retryCount int, fn func() (bool, error)) error { + for i := 1; i <= retryCount; i++ { + time.Sleep(globalRetryTime) + if ready, err := fn(); err != nil { + return err + } else if !ready { + log.Printf("Request for '%v' is in progress. Checking in %v", name, globalRetryTime) + continue + } + log.Printf("Request for '%v' is done!", name) + return nil + } + return fmt.Errorf("Request for '%v' hasn't completed after retrying %d times", name, retryCount) +} + +// applyTemplateVars applies golang templates to deployment files. +func applyTemplateVars(content []byte, deploymentVars map[string]string) ([]byte, error) { + fileContentParsed := bytes.NewBufferString("") + t := template.New("resource").Option("missingkey=error") + t = t.Funcs(template.FuncMap{ + // k8s objects can't have dots(.) se we add a custom function to allow normalising the variable values. + "normalise": func(t string) string { + return strings.ReplaceAll(t, ".", "-") + }, + "split": func(rangeVars, separator string) []string { + return strings.Split(rangeVars, separator) + }, + }) + if err := template.Must(t.Parse(string(content))).Execute(fileContentParsed, deploymentVars); err != nil { + return nil, fmt.Errorf("Failed to execute parse file err: %w", err) + } + return fileContentParsed.Bytes(), nil +} + +// DeploymentsParse parses the deployment files and returns the result as bytes grouped by the filename. +// Any variables passed to the cli will be replaced in the resources files following the golang text template format. +func DeploymentsParse(deploymentFiles []string, deploymentVars map[string]string) ([]Resource, error) { + var fileList []string + for _, name := range deploymentFiles { + if file, err := os.Stat(name); err == nil && file.IsDir() { + if err := filepath.Walk(name, func(path string, _ os.FileInfo, _ error) error { + if filepath.Ext(path) == ".yaml" || filepath.Ext(path) == ".yml" { + fileList = append(fileList, path) + } + return nil + }); err != nil { + return nil, fmt.Errorf("error reading directory: %w", err) + } + } else { + fileList = append(fileList, name) + } + } + + deploymentObjects := make([]Resource, 0) + for _, name := range fileList { + absFileName := strings.TrimSuffix(filepath.Base(name), filepath.Ext(name)) + content, err := os.ReadFile(name) + if err != nil { + log.Fatalf("Error reading file %v:%v", name, err) + } + // Don't parse file with the suffix "noparse". + if !strings.HasSuffix(absFileName, "noparse") { + content, err = applyTemplateVars(content, deploymentVars) + if err != nil { + return nil, fmt.Errorf("couldn't apply template to file %s: %w", name, err) + } + } + deploymentObjects = append(deploymentObjects, Resource{FileName: name, Content: content}) + } + return deploymentObjects, nil +} + +// MergeDeploymentVars merges multiple maps based on the order. +func MergeDeploymentVars(ms ...map[string]string) map[string]string { + res := map[string]string{} + for _, m := range ms { + for k, v := range m { + res[k] = v + } + } + return res +} diff --git a/corbench/pkg/provider/provider_test.go b/corbench/pkg/provider/provider_test.go new file mode 100644 index 0000000..fc27eca --- /dev/null +++ b/corbench/pkg/provider/provider_test.go @@ -0,0 +1,59 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package provider + +import ( + "reflect" + "testing" +) + +func TestMergeDeploymentVars(t *testing.T) { + dv1 := map[string]string{ + "foo": "apple", + "bar": "orange", + } + dv2 := map[string]string{ + "foo": "mango", + "baz": "banana", + "buzz": "jackfruit", + } + dv3 := map[string]string{ + "foo": "grape", + "baz": "blueberry", + } + testCases := []struct { + vars []map[string]string + merged map[string]string + }{ + { + vars: []map[string]string{dv1, dv2, dv3}, + merged: map[string]string{"bar": "orange", "baz": "blueberry", "buzz": "jackfruit", "foo": "grape"}, + }, + { + vars: []map[string]string{dv3, dv2, dv1}, + merged: map[string]string{"bar": "orange", "baz": "banana", "buzz": "jackfruit", "foo": "apple"}, + }, + { + vars: []map[string]string{dv3, dv1, dv2}, + merged: map[string]string{"bar": "orange", "baz": "banana", "buzz": "jackfruit", "foo": "mango"}, + }, + } + + for _, tc := range testCases { + r := MergeDeploymentVars(tc.vars...) + if eq := reflect.DeepEqual(tc.merged, r); !eq { + t.Errorf("\nexpect %#v\ngot %#v", tc.merged, r) + } + } +} diff --git a/corbench/setup-ebs-csi.sh b/corbench/setup-ebs-csi.sh new file mode 100755 index 0000000..fffd5e5 --- /dev/null +++ b/corbench/setup-ebs-csi.sh @@ -0,0 +1,104 @@ +#!/bin/bash +# Setup EBS CSI driver after cluster creation + +set -e + +# Disable AWS CLI pager to prevent interactive prompts +export AWS_PAGER="" + +echo "Setting up EBS CSI driver..." + +# Get AWS account ID from worker role ARN or current identity +AWS_ACCOUNT_ID=$(echo ${EKS_WORKER_ROLE_ARN} | cut -d':' -f5 2>/dev/null || aws sts get-caller-identity --query Account --output text) + +# Get current OIDC provider URL from the cluster +OIDC_PROVIDER=$(aws eks describe-cluster --name ${CLUSTER_NAME} --region ${ZONE} --query "cluster.identity.oidc.issuer" --output text | sed 's|https://||') +OIDC_ID=$(echo ${OIDC_PROVIDER} | cut -d'/' -f4) + +echo "Creating IAM role for EBS CSI driver..." + +echo "Printing OIDC provider: ${OIDC_ID}" + +# Set AWS credentials for cluster access +export AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-$(grep "accesskeyid:" ../auth_file.yaml | awk '{print $2}')} +export AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-$(grep "secretaccesskey:" ../auth_file.yaml | awk '{print $2}')} + +# Connect kubectl to the cluster +aws eks update-kubeconfig --name ${CLUSTER_NAME} --region ${ZONE} + +# Create OIDC provider if it doesn't exist +echo "Creating OIDC provider..." +aws iam create-open-id-connect-provider \ + --url https://${OIDC_PROVIDER} \ + --thumbprint-list 9e99a48a9960b14926bb7f3b02e22da2b0ab7280 \ + --client-id-list sts.amazonaws.com || echo "OIDC provider already exists, continuing..." + +# Create trust policy +cat > ebs-csi-trust-policy.json << EOF +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "${OIDC_PROVIDER}:aud": "sts.amazonaws.com", + "${OIDC_PROVIDER}:sub": "system:serviceaccount:kube-system:ebs-csi-controller-sa" + } + } + } + ] +} +EOF + +# Create IAM role (ignore error if already exists) +aws iam create-role \ + --role-name AmazonEKS_EBS_CSI_DriverRole \ + --assume-role-policy-document file://ebs-csi-trust-policy.json \ + --region ${ZONE} || echo "Role already exists, continuing..." + +# Update trust policy for existing role to ensure correct OIDC provider +aws iam update-assume-role-policy \ + --role-name AmazonEKS_EBS_CSI_DriverRole \ + --policy-document file://ebs-csi-trust-policy.json + +# Attach AWS managed policy +aws iam attach-role-policy \ + --role-name AmazonEKS_EBS_CSI_DriverRole \ + --policy-arn arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy \ + --region ${ZONE} + +echo "Installing EBS CSI driver addon..." + +# Install EBS CSI driver addon +aws eks create-addon \ + --cluster-name ${CLUSTER_NAME} \ + --addon-name aws-ebs-csi-driver \ + --service-account-role-arn arn:aws:iam::${AWS_ACCOUNT_ID}:role/AmazonEKS_EBS_CSI_DriverRole \ + --region ${ZONE} || echo "Addon already exists, updating..." + +# Wait for addon to be active +echo "Waiting for EBS CSI driver to be active..." +aws eks wait addon-active \ + --cluster-name ${CLUSTER_NAME} \ + --addon-name aws-ebs-csi-driver \ + --region ${ZONE} + +# Annotate service account with IAM role +echo "Annotating EBS CSI service account..." +kubectl annotate serviceaccount ebs-csi-controller-sa \ + -n kube-system \ + eks.amazonaws.com/role-arn=arn:aws:iam::${AWS_ACCOUNT_ID}:role/AmazonEKS_EBS_CSI_DriverRole \ + --overwrite + +# Apply storage class +kubectl apply -f gp2-csi-storageclass.yaml + +# Clean up temp files +rm -f ebs-csi-trust-policy.json + +echo "EBS CSI driver setup complete!" \ No newline at end of file diff --git a/scripts/README.md b/scripts/README.md index bf6d106..f46bbcf 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -4,6 +4,8 @@ This script updates the grafana dashboard deployment based on the grafana dashbo Simply update the grafana dashboard json, then in this /scripts directory run the following script to update the config file +From Root Directory: ```bash +cd scripts/ ./sync-corbench-dashboards.sh ``` \ No newline at end of file From 4b482097f177aa9ef34642884c7afb5d4f561bc4 Mon Sep 17 00:00:00 2001 From: Eric Kong Date: Fri, 8 Aug 2025 15:45:37 -0700 Subject: [PATCH 2/2] update some documentation Signed-off-by: Eric Kong --- corbench/docs/deployment.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/corbench/docs/deployment.md b/corbench/docs/deployment.md index fd5b0ef..f72dfcd 100644 --- a/corbench/docs/deployment.md +++ b/corbench/docs/deployment.md @@ -197,7 +197,7 @@ Before starting, it would be helpful to have some sort of note taking system to 1. **GitHub Integration 1/2**: - First generate a GitHub auth token: - - Login with the [Corbench github account](https://github.com/corbench) and generate a [new auth token](https://github.com/settings/tokens). + - Login with the [Corbench github account](https://github.com/corbench) (the credentials to this github account can be found in my handoff doc for my internship) and generate a [new auth token](https://github.com/settings/tokens). Steps: 1. After logging into the account, go to settings, then click on **Developer Settigns**. This should be near the bottom of the left hand side options. 2. Click on **Personal access tokens** then choose **Tokens (classic)** @@ -207,6 +207,7 @@ Before starting, it would be helpful to have some sort of note taking system to 6. Select the following scopes: `public_repo`, `read:org`, `write:discussion` 7. scroll to the bottom and click **Generate token** 8. Take note of the token as you will need it for github integration + 9. Give the Corbench github account repository contributor permissions to allow it to send messages. (this can be done in the respository settings) 2. **Main Node Pods Deployment**: Now we are ready to deploy the comment monitor, Prometheus, and grafana pods in the main node.